From 17e932b230803cc70a529226c2ae9acedf649918 Mon Sep 17 00:00:00 2001 From: Enrico Pozzobon Date: Fri, 12 Jun 2020 15:56:25 +0200 Subject: [PATCH] Merged rhys and rhys-avr into the same directory --- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.c | 339 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.h | 197 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/api.h | 5 ----- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/encrypt.c | 26 -------------------------- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-320-avr.S | 1767 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.c | 413 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.h | 168 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-320-avr.S | 1767 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.c | 9 +++++++-- ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.h | 3 +-- ace/Implementations/crypto_aead/aceae128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.c | 339 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.h | 197 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_hash/acehash256v1/rhys-avr/api.h | 1 - ace/Implementations/crypto_hash/acehash256v1/rhys-avr/hash.c | 8 -------- ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-320-avr.S | 1767 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.c | 413 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.h | 168 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ace/Implementations/crypto_hash/acehash256v1/rhys/ace.c | 339 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/ace.h | 197 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/api.h | 1 + ace/Implementations/crypto_hash/acehash256v1/rhys/hash.c | 8 ++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-320-avr.S | 1767 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.c | 413 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.h | 168 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ace/Implementations/crypto_hash/acehash256v1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/api.h | 5 ----- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.c | 383 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.h | 408 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/encrypt.c | 26 -------------------------- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon.c | 4 ++++ ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/api.h | 5 ----- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.c | 383 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.h | 408 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/encrypt.c | 26 -------------------------- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon.c | 4 ++++ ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/api.h | 5 ----- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.c | 383 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.h | 408 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/encrypt.c | 26 -------------------------- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon.c | 4 ++++ ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/api.h | 1 - ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon-hash.c | 118 ---------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon128.h | 408 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/hash.c | 8 -------- ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/api.h | 1 + ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon-hash.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon128.h | 408 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/hash.c | 8 ++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/api.h | 1 - ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon-xof.c | 123 --------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon128.h | 408 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/hash.c | 8 -------- ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/api.h | 1 + ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon-xof.c | 123 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon128.h | 408 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/hash.c | 8 ++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/api.h | 5 ----- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.c | 556 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.h | 274 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/encrypt.c | 26 -------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham-avr.S | 915 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.c | 138 ------------------------------------------------------------------------------------------------------------------------------------------ comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.h | 67 ------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64-avr.S | 272 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.c | 70 ---------------------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.h | 56 -------------------------------------------------------- comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ comet/Implementations/crypto_aead/comet128chamv1/rhys/comet.c | 69 +++++++++------------------------------------------------------------ comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham-avr.S | 915 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham.c | 4 ++++ comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64-avr.S | 272 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.h | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/api.h | 5 ----- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.c | 556 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.h | 274 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/encrypt.c | 26 -------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham-avr.S | 915 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.c | 138 ------------------------------------------------------------------------------------------------------------------------------------------ comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.h | 67 ------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64-avr.S | 272 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.c | 70 ---------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.h | 56 -------------------------------------------------------- comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ comet/Implementations/crypto_aead/comet64chamv1/rhys/comet.c | 69 +++++++++------------------------------------------------------------ comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham-avr.S | 915 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham.c | 4 ++++ comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64-avr.S | 272 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.h | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/api.h | 5 ----- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.c | 556 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.h | 274 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/encrypt.c | 26 -------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham-avr.S | 915 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.c | 138 ------------------------------------------------------------------------------------------------------------------------------------------ comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.h | 67 ------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64-avr.S | 272 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.c | 70 ---------------------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.h | 56 -------------------------------------------------------- comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ comet/Implementations/crypto_aead/comet64speckv1/rhys/comet.c | 69 +++++++++------------------------------------------------------------ comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham-avr.S | 915 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham.c | 4 ++++ comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64-avr.S | 272 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.h | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/api.h | 5 ----- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.c | 421 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.h | 264 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/encrypt.c | 26 -------------------------- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge-avr.S | 5092 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.c | 611 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.h | 345 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge-avr.S | 5092 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge.c | 11 +++++++++++ drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/api.h | 5 ----- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.c | 421 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.h | 264 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/encrypt.c | 26 -------------------------- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge-avr.S | 5092 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.c | 611 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.h | 345 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge-avr.S | 5092 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge.c | 11 +++++++++++ drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/api.h | 1 - drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.c | 421 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.h | 264 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/hash.c | 8 -------- drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge-avr.S | 5092 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.c | 611 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.h | 345 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/api.h | 1 + drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.c | 421 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.h | 264 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/hash.c | 8 ++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge-avr.S | 5092 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.c | 611 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.h | 345 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/api.h | 1 - drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.c | 421 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.h | 264 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/hash.c | 8 -------- drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge-avr.S | 5092 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.c | 611 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.h | 345 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/api.h | 1 + drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.c | 421 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.h | 264 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/hash.c | 8 ++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge-avr.S | 5092 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.c | 611 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.h | 345 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/api.h | 5 ----- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.c | 881 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.h | 291 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/encrypt.c | 26 -------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak-avr.S | 1552 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.c | 214 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.h | 87 --------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent-avr.S | 1677 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.c | 350 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.h | 91 ------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ elephant/Implementations/crypto_aead/elephant160v1/rhys/elephant.c | 28 ++++++++++++++-------------- elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak-avr.S | 1552 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.h | 3 +-- elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent-avr.S | 1677 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent.c | 4 ++++ elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/api.h | 5 ----- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.c | 881 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.h | 291 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/encrypt.c | 26 -------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak-avr.S | 1552 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.c | 214 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.h | 87 --------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent-avr.S | 1677 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.c | 350 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.h | 91 ------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ elephant/Implementations/crypto_aead/elephant176v1/rhys/elephant.c | 28 ++++++++++++++-------------- elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak-avr.S | 1552 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.h | 3 +-- elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent-avr.S | 1677 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent.c | 4 ++++ elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/api.h | 5 ----- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.c | 881 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.h | 291 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/encrypt.c | 26 -------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak-avr.S | 1552 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.c | 214 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.h | 87 --------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent-avr.S | 1677 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.c | 350 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.h | 91 ------------------------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ elephant/Implementations/crypto_aead/elephant200v1/rhys/elephant.c | 28 ++++++++++++++-------------- elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak-avr.S | 1552 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------- elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.h | 3 +-- elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent-avr.S | 1677 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent.c | 4 ++++ elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/api.h | 5 ----- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/encrypt.c | 26 -------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.c | 199 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.h | 137 ----------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128-config.h | 80 -------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.c | 1498 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.h | 246 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-avr.S | 4712 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-full-avr.S | 8173 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-small-avr.S | 9331 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-tiny-avr.S | 9480 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ estate/Implementations/crypto_aead/estatetwegift128v1/rhys/estate.c | 29 ++++++++++++++++------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128-config.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.c | 1061 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.h | 57 +++++++++++++++++++++++++++++++++++++-------------------- estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-avr.S | 4712 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-full-avr.S | 8173 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-small-avr.S | 9331 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-tiny-avr.S | 9480 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/api.h | 5 ----- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/encrypt.c | 26 -------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.c | 140 -------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.h | 551 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-paef.h | 273 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-saef.h | 251 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.c | 988 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.h | 141 --------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/api.h | 5 ----- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/encrypt.c | 26 -------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.c | 140 -------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.h | 551 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-paef.h | 273 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-saef.h | 251 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.c | 988 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.h | 141 --------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/api.h | 5 ----- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/encrypt.c | 26 -------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.c | 140 -------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.h | 551 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-paef.h | 273 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-saef.h | 251 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.c | 988 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.h | 141 --------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/api.h | 5 ----- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/encrypt.c | 26 -------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.c | 140 -------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.h | 551 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-paef.h | 273 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-saef.h | 251 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.c | 988 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.h | 141 --------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/api.h | 5 ----- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/encrypt.c | 26 -------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.c | 140 -------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.h | 551 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-paef.h | 273 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-saef.h | 251 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.c | 988 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.h | 141 --------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/api.h | 5 ----- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/encrypt.c | 26 -------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.c | 140 -------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.h | 551 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-paef.h | 273 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-saef.h | 251 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.c | 988 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.h | 141 --------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/api.h | 5 ----- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/encrypt.c | 26 -------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.c | 405 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.h | 127 ------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128-config.h | 80 -------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.c | 1498 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.h | 246 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-avr.S | 2104 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-full-avr.S | 5037 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-small-avr.S | 6053 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-tiny-avr.S | 6766 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/gift-cofb.c | 6 ++---- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128-config.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.c | 1061 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.h | 57 +++++++++++++++++++++++++++++++++++++-------------------- gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-avr.S | 2104 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-full-avr.S | 5037 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-small-avr.S | 6053 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-tiny-avr.S | 6766 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/api.h | 5 ----- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/encrypt.c | 26 -------------------------- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.c | 330 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.h | 220 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24-avr.S | 9419 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.c | 142 ---------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.h | 52 ---------------------------------------------------- gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24-avr.S | 9419 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24.c | 4 ++++ gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/api.h | 1 - gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.c | 330 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.h | 220 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/hash.c | 8 -------- gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24-avr.S | 9419 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.c | 142 ---------------------------------------------------------------------------------------------------------------------------------------------- gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.h | 52 ---------------------------------------------------- gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/api.h | 1 + gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.c | 330 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.h | 220 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/hash.c | 8 ++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24-avr.S | 9419 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.c | 142 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/api.h | 5 ----- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/encrypt.c | 26 -------------------------- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.c | 151 ------------------------------------------------------------------------------------------------------------------------------------------------------- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.h | 125 ----------------------------------------------------------------------------------------------------------------------------- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.c | 411 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.h | 113 ----------------------------------------------------------------------------------------------------------------- grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ grain-128aead/Implementations/crypto_aead/grain128aead/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/api.h | 5 ----- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/encrypt.c | 26 -------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.c | 293 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.h | 126 ------------------------------------------------------------------------------------------------------------------------------ hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128-config.h | 80 -------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.c | 1498 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.h | 246 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-avr.S | 4712 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-full-avr.S | 8173 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-small-avr.S | 9331 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-tiny-avr.S | 9480 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ hyena/Implementations/crypto_aead/hyenav1/rhys/hyena.c | 48 +++++++++++++++++++++++++++++------------------- hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128-config.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.c | 1061 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.h | 57 +++++++++++++++++++++++++++++++++++++-------------------- hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-avr.S | 4712 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-full-avr.S | 8173 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-small-avr.S | 9331 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-tiny-avr.S | 9480 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ hyena/Implementations/crypto_aead/hyenav1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/api.h | 5 ----- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/encrypt.c | 26 -------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-isap.h | 249 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak-avr.S | 1552 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.c | 214 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.h | 87 --------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.c | 110 -------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.h | 330 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon.c | 4 ++++ isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak-avr.S | 1552 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.h | 3 +-- isap/Implementations/crypto_aead/isapa128av20/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/api.h | 5 ----- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/encrypt.c | 26 -------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-isap.h | 249 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak-avr.S | 1552 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.c | 214 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.h | 87 --------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.c | 110 -------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.h | 330 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon.c | 4 ++++ isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak-avr.S | 1552 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.h | 3 +-- isap/Implementations/crypto_aead/isapa128v20/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/api.h | 5 ----- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/encrypt.c | 26 -------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-isap.h | 249 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak-avr.S | 1552 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.c | 214 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.h | 87 --------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.c | 110 -------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.h | 330 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon.c | 4 ++++ isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak-avr.S | 1552 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.h | 3 +-- isap/Implementations/crypto_aead/isapk128av20/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/api.h | 5 ----- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/encrypt.c | 26 -------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon-avr.S | 778 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.c | 80 -------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.h | 64 ---------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-isap.h | 249 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak-avr.S | 1552 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.c | 214 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.h | 87 --------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.c | 110 -------------------------------------------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.h | 330 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon-avr.S | 778 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon.c | 4 ++++ isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak-avr.S | 1552 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------- isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.h | 3 +-- isap/Implementations/crypto_aead/isapk128v20/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/api.h | 5 ----- knot/Implementations/crypto_aead/knot128v1/rhys-avr/encrypt.c | 26 -------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot-aead.c | 503 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot.c | 4 ++++ knot/Implementations/crypto_aead/knot128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/api.h | 5 ----- knot/Implementations/crypto_aead/knot128v2/rhys-avr/encrypt.c | 26 -------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot-aead.c | 503 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot.c | 4 ++++ knot/Implementations/crypto_aead/knot128v2/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/api.h | 5 ----- knot/Implementations/crypto_aead/knot192/rhys-avr/encrypt.c | 26 -------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_aead/knot192/rhys-avr/knot-aead.c | 503 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot192/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot192/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot192/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot192/rhys/internal-knot.c | 4 ++++ knot/Implementations/crypto_aead/knot192/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/api.h | 5 ----- knot/Implementations/crypto_aead/knot256/rhys-avr/encrypt.c | 26 -------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_aead/knot256/rhys-avr/knot-aead.c | 503 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_aead/knot256/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot256/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot256/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_aead/knot256/rhys/internal-knot.c | 4 ++++ knot/Implementations/crypto_aead/knot256/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/api.h | 1 - knot/Implementations/crypto_hash/knot256v1/rhys-avr/hash.c | 8 -------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot-hash.c | 186 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/api.h | 1 + knot/Implementations/crypto_hash/knot256v1/rhys/hash.c | 8 ++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.c | 301 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.h | 130 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/knot-hash.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v1/rhys/knot.h | 459 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/api.h | 1 - knot/Implementations/crypto_hash/knot256v2/rhys-avr/hash.c | 8 -------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot-hash.c | 186 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/api.h | 1 + knot/Implementations/crypto_hash/knot256v2/rhys/hash.c | 8 ++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.c | 301 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.h | 130 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/knot-hash.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot256v2/rhys/knot.h | 459 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys-avr/api.h | 1 - knot/Implementations/crypto_hash/knot384/rhys-avr/hash.c | 8 -------- knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot384/rhys-avr/knot-hash.c | 186 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot384/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot384/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/api.h | 1 + knot/Implementations/crypto_hash/knot384/rhys/hash.c | 8 ++++++++ knot/Implementations/crypto_hash/knot384/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/internal-knot.c | 301 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/internal-knot.h | 130 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/knot-hash.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot384/rhys/knot.h | 459 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys-avr/api.h | 1 - knot/Implementations/crypto_hash/knot512/rhys-avr/hash.c | 8 -------- knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-256-avr.S | 1093 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-384-avr.S | 833 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-512-avr.S | 2315 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.c | 301 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.h | 130 ---------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot512/rhys-avr/knot-hash.c | 186 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ knot/Implementations/crypto_hash/knot512/rhys-avr/knot.h | 459 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knot/Implementations/crypto_hash/knot512/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/api.h | 1 + knot/Implementations/crypto_hash/knot512/rhys/hash.c | 8 ++++++++ knot/Implementations/crypto_hash/knot512/rhys/internal-knot-256-avr.S | 1093 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/internal-knot-384-avr.S | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/internal-knot-512-avr.S | 2315 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/internal-knot.c | 301 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/internal-knot.h | 130 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/knot-hash.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ knot/Implementations/crypto_hash/knot512/rhys/knot.h | 459 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/api.h | 5 ----- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/encrypt.c | 26 -------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64-avr.S | 6047 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.c | 1205 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.h | 191 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.c | 436 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.h | 223 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64-avr.S | 6047 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.c | 670 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.h | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/lotus-locus.c | 66 +++++++++++++++++++++++++++++++++--------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/api.h | 5 ----- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/encrypt.c | 26 -------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64-avr.S | 6047 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.c | 1205 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.h | 191 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.c | 436 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.h | 223 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64-avr.S | 6047 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.c | 670 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.h | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------------------------------------- lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/lotus-locus.c | 66 +++++++++++++++++++++++++++++++++--------------------------------- orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- orange/Implementations/crypto_aead/orangezestv1/rhys-avr/api.h | 5 ----- orange/Implementations/crypto_aead/orangezestv1/rhys-avr/encrypt.c | 26 -------------------------- orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.c | 479 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.h | 54 ------------------------------------------------------ orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.c | 384 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.h | 153 --------------------------------------------------------------------------------------------------------------------------------------------------------- orange/Implementations/crypto_aead/orangezestv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- orange/Implementations/crypto_hash/orangishv1/rhys-avr/api.h | 1 - orange/Implementations/crypto_hash/orangishv1/rhys-avr/hash.c | 8 -------- orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.c | 479 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.h | 54 ------------------------------------------------------ orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.c | 384 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.h | 153 --------------------------------------------------------------------------------------------------------------------------------------------------------- orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ orange/Implementations/crypto_hash/orangishv1/rhys/api.h | 1 + orange/Implementations/crypto_hash/orangishv1/rhys/hash.c | 8 ++++++++ orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.c | 479 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.h | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ orange/Implementations/crypto_hash/orangishv1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ orange/Implementations/crypto_hash/orangishv1/rhys/orange.c | 384 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ orange/Implementations/crypto_hash/orangishv1/rhys/orange.h | 153 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/api.h | 5 ----- oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/encrypt.c | 26 -------------------------- oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp-avr.S | 949 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.c | 172 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.h | 74 -------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.c | 480 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.h | 212 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp-avr.S | 949 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp.c | 4 ++++ oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/api.h | 5 ----- oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/encrypt.c | 26 -------------------------- oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp-avr.S | 949 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.c | 172 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.h | 74 -------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.c | 480 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.h | 212 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp-avr.S | 949 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp.c | 4 ++++ oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/api.h | 5 ----- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/encrypt.c | 26 -------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.c | 479 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.h | 54 ------------------------------------------------------ photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.c | 451 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.h | 224 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/api.h | 5 ----- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/encrypt.c | 26 -------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.c | 479 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.h | 54 ------------------------------------------------------ photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.c | 451 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.h | 224 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/api.h | 1 - photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/hash.c | 8 -------- photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.c | 479 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.h | 54 ------------------------------------------------------ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.c | 451 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.h | 224 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/api.h | 1 + photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/hash.c | 8 ++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.c | 479 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.h | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.c | 451 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.h | 224 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/api.h | 5 ----- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/encrypt.c | 26 -------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-ocb.h | 355 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask-avr.S | 8883 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.c | 356 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.h | 253 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask-128.c | 44 -------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask.h | 335 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-ocb.h | 22 +++++++++++++++++++++- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask-avr.S | 8883 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------- pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/pyjamask-128.c | 4 ++-- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/api.h | 5 ----- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/encrypt.c | 26 -------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-ocb.h | 355 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask-avr.S | 8883 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.c | 356 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.h | 253 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask-96.c | 44 -------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask.h | 335 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-ocb.h | 22 +++++++++++++++++++++- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask-avr.S | 8883 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------- pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/pyjamask-96.c | 17 ++--------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/api.h | 5 ----- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/encrypt.c | 26 -------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.c | 1974 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.h | 476 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusm1/rhys/romulus.c | 47 +++++++++++++++++++++++++++++------------------ romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/api.h | 5 ----- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/encrypt.c | 26 -------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.c | 1974 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.h | 476 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm2/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusm2/rhys/romulus.c | 47 +++++++++++++++++++++++++++++------------------ romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/api.h | 5 ----- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/encrypt.c | 26 -------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.c | 1974 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.h | 476 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusm3/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusm3/rhys/romulus.c | 47 +++++++++++++++++++++++++++++------------------ romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/api.h | 5 ----- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/encrypt.c | 26 -------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.c | 1974 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.h | 476 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusn1/rhys/romulus.c | 47 +++++++++++++++++++++++++++++------------------ romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/api.h | 5 ----- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/encrypt.c | 26 -------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.c | 1974 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.h | 476 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn2/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusn2/rhys/romulus.c | 47 +++++++++++++++++++++++++++++------------------ romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/api.h | 5 ----- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/encrypt.c | 26 -------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.c | 1974 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.h | 476 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- romulus/Implementations/crypto_aead/romulusn3/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ romulus/Implementations/crypto_aead/romulusn3/rhys/romulus.c | 47 +++++++++++++++++++++++++++++------------------ saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/api.h | 5 ----- saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/encrypt.c | 26 -------------------------- saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.c | 781 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.h | 270 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/api.h | 5 ----- saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/encrypt.c | 26 -------------------------- saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.c | 781 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.h | 270 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ saturnin/Implementations/crypto_aead/saturninshortv2/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/api.h | 1 - saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/hash.c | 8 -------- saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.c | 781 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.h | 270 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ saturnin/Implementations/crypto_hash/saturninhashv2/rhys/api.h | 1 + saturnin/Implementations/crypto_hash/saturninhashv2/rhys/hash.c | 8 ++++++++ saturnin/Implementations/crypto_hash/saturninhashv2/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.c | 781 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.h | 270 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/api.h | 5 ----- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/encrypt.c | 26 -------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.c | 804 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.h | 518 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/skinny-aead.c | 15 ++++++++------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/api.h | 5 ----- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/encrypt.c | 26 -------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.c | 804 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.h | 518 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/skinny-aead.c | 15 ++++++++------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/api.h | 5 ----- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/encrypt.c | 26 -------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.c | 804 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.h | 518 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/skinny-aead.c | 15 ++++++++------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/api.h | 5 ----- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/encrypt.c | 26 -------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.c | 804 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.h | 518 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/skinny-aead.c | 15 ++++++++------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/api.h | 5 ----- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/encrypt.c | 26 -------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.c | 804 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.h | 518 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/skinny-aead.c | 15 ++++++++------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/api.h | 5 ----- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/encrypt.c | 26 -------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.c | 804 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.h | 518 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.h | 137 +++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/skinny-aead.c | 15 ++++++++------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/api.h | 1 - skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/hash.c | 8 -------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.c | 174 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.h | 96 ------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/api.h | 1 + skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/hash.c | 8 ++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.c | 801 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.h | 244 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinnyutil.h | 328 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.c | 174 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.h | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/api.h | 1 - skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/hash.c | 8 -------- skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128-avr.S | 10099 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.c | 801 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.h | 244 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinnyutil.h | 328 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.c | 174 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.h | 96 ------------------------------------------------------------------------------------------------ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/api.h | 1 + skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/hash.c | 8 ++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128-avr.S | 10099 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.c | 801 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.h | 244 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinnyutil.h | 328 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.c | 174 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.h | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/api.h | 5 ----- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/encrypt.c | 26 -------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle-avr.S | 2887 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.c | 382 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.h | 82 ---------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.c | 1135 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.h | 515 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle-avr.S | 2887 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------- sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/sparkle.c | 168 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------ sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/api.h | 5 ----- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/encrypt.c | 26 -------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle-avr.S | 2887 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.c | 382 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.h | 82 ---------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.c | 1135 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.h | 515 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle-avr.S | 2887 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------- sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/sparkle.c | 168 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------ sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/api.h | 5 ----- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/encrypt.c | 26 -------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle-avr.S | 2887 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.c | 382 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.h | 82 ---------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.c | 1135 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.h | 515 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle-avr.S | 2887 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/sparkle.c | 168 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------ sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/api.h | 5 ----- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/encrypt.c | 26 -------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle-avr.S | 2887 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.c | 382 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.h | 82 ---------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.c | 1135 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.h | 515 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle-avr.S | 2887 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------- sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/sparkle.c | 168 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------ sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/api.h | 1 - sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/hash.c | 8 -------- sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle-avr.S | 2887 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.c | 382 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.h | 82 ---------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.c | 1135 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.h | 515 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/api.h | 1 + sparkle/Implementations/crypto_hash/esch256v1/rhys/hash.c | 8 ++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle-avr.S | 2887 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.c | 382 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.c | 1135 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.h | 515 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/api.h | 1 - sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/hash.c | 8 -------- sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle-avr.S | 2887 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.c | 382 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.h | 82 ---------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.c | 1135 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.h | 515 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/api.h | 1 + sparkle/Implementations/crypto_hash/esch384v1/rhys/hash.c | 8 ++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle-avr.S | 2887 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.c | 382 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.c | 1135 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.h | 515 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spix/Implementations/crypto_aead/spix128v1/rhys-avr/api.h | 5 ----- spix/Implementations/crypto_aead/spix128v1/rhys-avr/encrypt.c | 26 -------------------------- spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-256-spix-avr.S | 1129 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.c | 413 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.h | 168 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.c | 211 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.h | 126 ------------------------------------------------------------------------------------------------------------------------------ spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-256-spix-avr.S | 1129 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.c | 9 +++++++-- spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.h | 3 +-- spix/Implementations/crypto_aead/spix128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/api.h | 5 ----- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/encrypt.c | 26 -------------------------- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-192-avr.S | 794 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-256-spoc-avr.S | 1142 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.c | 413 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.h | 168 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.c | 406 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.h | 204 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-192-avr.S | 794 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-256-spoc-avr.S | 1142 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.c | 9 +++++++-- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.h | 3 +-- spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/spoc.c | 14 +++++++------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/api.h | 5 ----- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/encrypt.c | 26 -------------------------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-192-avr.S | 794 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-256-spoc-avr.S | 1142 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.c | 413 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.h | 168 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.c | 406 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.h | 204 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-192-avr.S | 794 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-256-spoc-avr.S | 1142 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.c | 9 +++++++-- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.h | 3 +-- spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/spoc.c | 14 +++++++------- spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/api.h | 5 ----- spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/encrypt.c | 26 -------------------------- spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.c | 557 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.h | 146 -------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.c | 552 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.h | 344 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu384v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/api.h | 5 ----- spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/encrypt.c | 26 -------------------------- spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.c | 557 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.h | 146 -------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.c | 552 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.h | 344 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128mu512v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/api.h | 5 ----- spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/encrypt.c | 26 -------------------------- spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.c | 557 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.h | 146 -------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.c | 552 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.h | 344 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su384v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/api.h | 5 ----- spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/encrypt.c | 26 -------------------------- spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.c | 557 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.h | 146 -------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.c | 552 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.h | 344 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- spook/Implementations/crypto_aead/spook128su512v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/api.h | 5 ----- subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/encrypt.c | 26 -------------------------- subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.c | 441 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.h | 144 ------------------------------------------------------------------------------------------------------------------------------------------------ subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.c | 228 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.h | 200 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- subterranean/Implementations/crypto_aead/subterraneanv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/api.h | 1 - subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/hash.c | 8 -------- subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.c | 441 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.h | 144 ------------------------------------------------------------------------------------------------------------------------------------------------ subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.c | 228 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.h | 200 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys/api.h | 1 + subterranean/Implementations/crypto_hash/subterraneanv1/rhys/hash.c | 8 ++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.c | 441 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.h | 144 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.c | 228 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.h | 200 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/api.h | 5 ----- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/encrypt.c | 26 -------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128-config.h | 80 -------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.c | 1498 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.h | 246 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-avr.S | 2104 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-full-avr.S | 5037 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-small-avr.S | 6053 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-tiny-avr.S | 6766 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.c | 356 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.h | 341 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128-config.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.c | 1061 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.h | 57 +++++++++++++++++++++++++++++++++++++-------------------- sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-avr.S | 2104 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-full-avr.S | 5037 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-small-avr.S | 6053 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-tiny-avr.S | 6766 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/sundae-gift.c | 6 ++---- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/api.h | 5 ----- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/encrypt.c | 26 -------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128-config.h | 80 -------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.c | 1498 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.h | 246 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-avr.S | 2104 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-full-avr.S | 5037 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-small-avr.S | 6053 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-tiny-avr.S | 6766 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.c | 356 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.h | 341 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128-config.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.c | 1061 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.h | 57 +++++++++++++++++++++++++++++++++++++-------------------- sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-avr.S | 2104 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-full-avr.S | 5037 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-small-avr.S | 6053 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-tiny-avr.S | 6766 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/sundae-gift.c | 6 ++---- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/api.h | 5 ----- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/encrypt.c | 26 -------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128-config.h | 80 -------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.c | 1498 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.h | 246 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-avr.S | 2104 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-full-avr.S | 5037 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-small-avr.S | 6053 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-tiny-avr.S | 6766 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.c | 356 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.h | 341 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128-config.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.c | 1061 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.h | 57 +++++++++++++++++++++++++++++++++++++-------------------- sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-avr.S | 2104 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-full-avr.S | 5037 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-small-avr.S | 6053 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-tiny-avr.S | 6766 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/sundae-gift.c | 6 ++---- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/api.h | 5 ----- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/encrypt.c | 26 -------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128-config.h | 80 -------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.c | 1498 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.h | 246 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-avr.S | 2104 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-full-avr.S | 5037 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-small-avr.S | 6053 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-tiny-avr.S | 6766 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.c | 356 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.h | 341 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128-config.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.c | 1061 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.h | 57 +++++++++++++++++++++++++++++++++++++-------------------- sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-avr.S | 2104 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-full-avr.S | 5037 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-small-avr.S | 6053 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-tiny-avr.S | 6766 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/sundae-gift.c | 6 ++---- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/api.h | 5 ----- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/encrypt.c | 26 -------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu-avr.S | 471 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.c | 70 ---------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.h | 72 ------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.c | 487 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.h | 270 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu-avr.S | 471 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu.c | 4 ++++ tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/api.h | 5 ----- tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/encrypt.c | 26 -------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu-avr.S | 471 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.c | 70 ---------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.h | 72 ------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.c | 487 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.h | 270 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu-avr.S | 471 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu.c | 4 ++++ tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/api.h | 5 ----- tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/encrypt.c | 26 -------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu-avr.S | 471 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.c | 70 ---------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.h | 72 ------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.c | 487 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.h | 270 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu-avr.S | 471 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu.c | 4 ++++ tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- wage/Implementations/crypto_aead/wageae128v1/rhys-avr/api.h | 5 ----- wage/Implementations/crypto_aead/wageae128v1/rhys-avr/encrypt.c | 26 -------------------------- wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.c | 512 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.h | 117 --------------------------------------------------------------------------------------------------------------------- wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.c | 168 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.h | 127 ------------------------------------------------------------------------------------------------------------------------------- wage/Implementations/crypto_aead/wageae128v1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/api.h | 5 ----- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/encrypt.c | 26 -------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo-avr.S | 935 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.c | 166 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.h | 80 -------------------------------------------------------------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.c | 321 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.h | 226 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-util.h | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo-avr.S | 935 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo.c | 4 ++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.c | 69 --------------------------------------------------------------------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.h | 256 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/api.h | 1 - xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/hash.c | 8 -------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-util.h | 702 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo-avr.S | 935 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.c | 166 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.h | 80 -------------------------------------------------------------------------------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.c | 321 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.h | 226 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.h | 256 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/api.h | 1 + xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/hash.c | 8 ++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-util.h | 702 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo-avr.S | 935 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.c | 166 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.c | 321 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.h | 226 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1541 files changed, 532521 insertions(+), 732921 deletions(-) delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.c delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.h delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.c delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.h delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/api.h delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/encrypt.c delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-320-avr.S delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.c delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.h delete mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-util.h create mode 100644 ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-320-avr.S delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.c delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.h delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.c delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.h delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/api.h delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/hash.c delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-320-avr.S delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.c delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.h delete mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-util.h create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/ace.c create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/ace.h create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.c create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.h create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/api.h create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/hash.c create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-320-avr.S create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.c create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.h create mode 100644 ace/Implementations/crypto_hash/acehash256v1/rhys/internal-util.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/api.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/encrypt.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-util.h create mode 100644 ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/api.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/encrypt.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.c delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.h delete mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-util.h create mode 100644 ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.c delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.h delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/api.h delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.c delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.h delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/encrypt.c delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.c delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.h delete mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-util.h create mode 100644 ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.c delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.h delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/api.h delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon-hash.c delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon128.h delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/hash.c delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.c delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.h delete mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-util.h create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.c create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.h create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/api.h create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon-hash.c create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon128.h create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/hash.c create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon-avr.S create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.c create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.h create mode 100644 ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-util.h delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.c delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.h delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/api.h delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon-xof.c delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon128.h delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/hash.c delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon-avr.S delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.c delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.h delete mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-util.h create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.c create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.h create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/api.h create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon-xof.c create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon128.h create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/hash.c create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon-avr.S create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.c create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.h create mode 100644 ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-util.h delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.c delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.h delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/api.h delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.c delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.h delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/encrypt.c delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham-avr.S delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.c delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.h delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64-avr.S delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.c delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.h delete mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-util.h create mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham-avr.S create mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64-avr.S create mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.c create mode 100644 comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.h delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.c delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.h delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/api.h delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.c delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.h delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/encrypt.c delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham-avr.S delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.c delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.h delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64-avr.S delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.c delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.h delete mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-util.h create mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham-avr.S create mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64-avr.S create mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.c create mode 100644 comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.h delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.c delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.h delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/api.h delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.c delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.h delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/encrypt.c delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham-avr.S delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.c delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.h delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64-avr.S delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.c delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.h delete mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-util.h create mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham-avr.S create mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64-avr.S create mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.c create mode 100644 comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/api.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/encrypt.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge-avr.S delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-util.h create mode 100644 drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge-avr.S delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/api.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/encrypt.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge-avr.S delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.c delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.h delete mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-util.h create mode 100644 drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge-avr.S delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/api.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/hash.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge-avr.S delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-util.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/api.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/hash.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge-avr.S create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-util.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/api.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/hash.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge-avr.S delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.c delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.h delete mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-util.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/api.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/hash.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge-avr.S create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.c create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.h create mode 100644 drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-util.h delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.c delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.h delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/api.h delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.c delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.h delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/encrypt.c delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.c delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.h delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.c delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.h delete mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-util.h create mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak-avr.S create mode 100644 elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.c delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.h delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/api.h delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.c delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.h delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/encrypt.c delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.c delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.h delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.c delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.h delete mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-util.h create mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak-avr.S create mode 100644 elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.c delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.h delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/api.h delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.c delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.h delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/encrypt.c delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.c delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.h delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent-avr.S delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.c delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.h delete mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-util.h create mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak-avr.S create mode 100644 elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent-avr.S delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.c delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.h delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/api.h delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/encrypt.c delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.c delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.h delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128-config.h delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.c delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.h delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-avr.S delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-full-avr.S delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-small-avr.S delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-tiny-avr.S delete mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-util.h create mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128-config.h create mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-avr.S create mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-full-avr.S create mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-small-avr.S create mode 100644 estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-tiny-avr.S delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/api.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/encrypt.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-paef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-saef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-skinnyutil.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-util.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/api.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/encrypt.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-paef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-saef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-skinnyutil.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-util.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/api.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/encrypt.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-paef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-saef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-skinnyutil.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-util.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/api.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/encrypt.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-paef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-saef.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.c delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-skinnyutil.h delete mode 100644 forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-util.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/api.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/encrypt.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-paef.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-saef.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-skinnyutil.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-util.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/api.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/encrypt.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-paef.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-saef.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.c delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-skinnyutil.h delete mode 100644 forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-util.h delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.c delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.h delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/api.h delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/encrypt.c delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.c delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.h delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128-config.h delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.c delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.h delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-avr.S delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-full-avr.S delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-small-avr.S delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-tiny-avr.S delete mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-util.h create mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128-config.h create mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-avr.S create mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-full-avr.S create mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-small-avr.S create mode 100644 gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-tiny-avr.S delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.c delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.h delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/api.h delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/encrypt.c delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.c delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.h delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24-avr.S delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.c delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.h delete mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-util.h create mode 100644 gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24-avr.S delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.c delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.h delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/api.h delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.c delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.h delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/hash.c delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24-avr.S delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.c delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.h delete mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-util.h create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.c create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.h create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/api.h create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.c create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.h create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/hash.c create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24-avr.S create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.c create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.h create mode 100644 gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-util.h delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.c delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.h delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/api.h delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/encrypt.c delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.c delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.h delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.c delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.h delete mode 100644 grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-util.h delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.c delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.h delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/api.h delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/encrypt.c delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.c delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.h delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128-config.h delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.c delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.h delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-avr.S delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-full-avr.S delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-small-avr.S delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-tiny-avr.S delete mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-util.h create mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128-config.h create mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-avr.S create mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-full-avr.S create mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-small-avr.S create mode 100644 hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-tiny-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.c delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.h delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/api.h delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/encrypt.c delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.c delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.h delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-isap.h delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.c delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.h delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-util.h delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.c delete mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.h create mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon-avr.S create mode 100644 isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.c delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.h delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/api.h delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/encrypt.c delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.c delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.h delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-isap.h delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.c delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.h delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-util.h delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.c delete mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.h create mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon-avr.S create mode 100644 isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.c delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.h delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/api.h delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/encrypt.c delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.c delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.h delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-isap.h delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.c delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.h delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-util.h delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.c delete mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.h create mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon-avr.S create mode 100644 isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.c delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.h delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/api.h delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/encrypt.c delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.c delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.h delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-isap.h delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak-avr.S delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.c delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.h delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-util.h delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.c delete mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.h create mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon-avr.S create mode 100644 isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/encrypt.c delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot-aead.c delete mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/encrypt.c delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot-aead.c delete mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/encrypt.c delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/knot-aead.c delete mode 100644 knot/Implementations/crypto_aead/knot192/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_aead/knot192/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_aead/knot192/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_aead/knot192/rhys/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/encrypt.c delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/knot-aead.c delete mode 100644 knot/Implementations/crypto_aead/knot256/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_aead/knot256/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_aead/knot256/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_aead/knot256/rhys/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/hash.c delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot-hash.c delete mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.c create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.h create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/api.h create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/hash.c create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-512-avr.S create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.c create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.h create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/internal-util.h create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/knot-hash.c create mode 100644 knot/Implementations/crypto_hash/knot256v1/rhys/knot.h delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/hash.c delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot-hash.c delete mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.c create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.h create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/api.h create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/hash.c create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-512-avr.S create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.c create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.h create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/internal-util.h create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/knot-hash.c create mode 100644 knot/Implementations/crypto_hash/knot256v2/rhys/knot.h delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/hash.c delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/knot-hash.c delete mode 100644 knot/Implementations/crypto_hash/knot384/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/aead-common.c create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/aead-common.h create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/api.h create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/hash.c create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/internal-knot-512-avr.S create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/internal-knot.c create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/internal-knot.h create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/internal-util.h create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/knot-hash.c create mode 100644 knot/Implementations/crypto_hash/knot384/rhys/knot.h delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.c delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.h delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/api.h delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/hash.c delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-256-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-384-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-512-avr.S delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.c delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.h delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/internal-util.h delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/knot-hash.c delete mode 100644 knot/Implementations/crypto_hash/knot512/rhys-avr/knot.h create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/aead-common.c create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/aead-common.h create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/api.h create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/hash.c create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/internal-knot-256-avr.S create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/internal-knot-384-avr.S create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/internal-knot-512-avr.S create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/internal-knot.c create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/internal-knot.h create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/internal-util.h create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/knot-hash.c create mode 100644 knot/Implementations/crypto_hash/knot512/rhys/knot.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/api.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/encrypt.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64-avr.S delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-util.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.h create mode 100644 lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64-avr.S delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/api.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/encrypt.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64-avr.S delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-util.h delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.c delete mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.h create mode 100644 lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64-avr.S delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.c delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.h delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/api.h delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/encrypt.c delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.c delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.h delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-util.h delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.c delete mode 100644 orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.h delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.c delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.h delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/api.h delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/hash.c delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.c delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.h delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-util.h delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.c delete mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.h create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.c create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.h create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/api.h create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/hash.c create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.c create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.h create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/internal-util.h create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/orange.c create mode 100644 orange/Implementations/crypto_hash/orangishv1/rhys/orange.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/api.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/encrypt.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp-avr.S delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-util.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.h create mode 100644 oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp-avr.S delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/api.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/encrypt.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp-avr.S delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-util.h delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.c delete mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.h create mode 100644 oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp-avr.S delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/api.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/encrypt.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-util.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/api.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/encrypt.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-util.h delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.c delete mode 100644 photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.h delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.c delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.h delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/api.h delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/hash.c delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.c delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.h delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-util.h delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.c delete mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.h create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.c create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.h create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/api.h create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/hash.c create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.c create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.h create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-util.h create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.c create mode 100644 photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/api.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/encrypt.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-ocb.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask-avr.S delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-util.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask-128.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask.h create mode 100644 pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask-avr.S delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/api.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/encrypt.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-ocb.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask-avr.S delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-util.h delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask-96.c delete mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask.h create mode 100644 pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/api.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/encrypt.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinnyutil.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-util.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.h create mode 100644 romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/api.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/encrypt.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinnyutil.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-util.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.h create mode 100644 romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/api.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/encrypt.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinnyutil.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-util.h delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.c delete mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.h create mode 100644 romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/api.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/encrypt.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinnyutil.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-util.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.h create mode 100644 romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/api.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/encrypt.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinnyutil.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-util.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.h create mode 100644 romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/api.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/encrypt.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128-avr.S delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinnyutil.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-util.h delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.c delete mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.h create mode 100644 romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128-avr.S delete mode 100644 saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.c delete mode 100644 saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.h delete mode 100644 saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/api.h delete mode 100644 saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/encrypt.c delete mode 100644 saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/internal-util.h delete mode 100644 saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.c delete mode 100644 saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.h delete mode 100644 saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.c delete mode 100644 saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.h delete mode 100644 saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/api.h delete mode 100644 saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/encrypt.c delete mode 100644 saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/internal-util.h delete mode 100644 saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.c delete mode 100644 saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.h delete mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.c delete mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.h delete mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/api.h delete mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/hash.c delete mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/internal-util.h delete mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.c delete mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.h create mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.c create mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.h create mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys/api.h create mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys/hash.c create mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys/internal-util.h create mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.c create mode 100644 saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/encrypt.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.h create mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/encrypt.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.h create mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/encrypt.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.h create mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/encrypt.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.h create mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/encrypt.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.h create mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/encrypt.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.c delete mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.h create mode 100644 skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/hash.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/api.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/hash.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128-avr.S create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinnyutil.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-util.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/api.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/hash.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128-avr.S delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinnyutil.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-util.h delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.c delete mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/api.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/hash.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128-avr.S create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinnyutil.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-util.h create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.c create mode 100644 skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/api.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/encrypt.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-util.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.h create mode 100644 sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/api.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/encrypt.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-util.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.h create mode 100644 sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/api.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/encrypt.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-util.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.h create mode 100644 sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/api.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/encrypt.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-util.h delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.c delete mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.h create mode 100644 sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.c delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.h delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/api.h delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/hash.c delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.c delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.h delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-util.h delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.c delete mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.h create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.c create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.h create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/api.h create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/hash.c create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle-avr.S create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.c create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.h create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-util.h create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.c create mode 100644 sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.h delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.c delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.h delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/api.h delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/hash.c delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle-avr.S delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.c delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.h delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-util.h delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.c delete mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.h create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.c create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.h create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/api.h create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/hash.c create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle-avr.S create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.c create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.h create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-util.h create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.c create mode 100644 sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.h delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.c delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.h delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/api.h delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/encrypt.c delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-256-spix-avr.S delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.c delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.h delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-util.h delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.c delete mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.h create mode 100644 spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-256-spix-avr.S delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.c delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.h delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/api.h delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/encrypt.c delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-192-avr.S delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-256-spoc-avr.S delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.c delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.h delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-util.h delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.c delete mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.h create mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-192-avr.S create mode 100644 spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-256-spoc-avr.S delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.c delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.h delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/api.h delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/encrypt.c delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-192-avr.S delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-256-spoc-avr.S delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.c delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.h delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-util.h delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.c delete mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.h create mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-192-avr.S create mode 100644 spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-256-spoc-avr.S delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/api.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/encrypt.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-util.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/api.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/encrypt.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-util.h delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.h delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.c delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.h delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/api.h delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/encrypt.c delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.h delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-util.h delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.h delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.c delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.h delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/api.h delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/encrypt.c delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.h delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-util.h delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.c delete mode 100644 spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.h delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.c delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.h delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/api.h delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/encrypt.c delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.c delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.h delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-util.h delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.c delete mode 100644 subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.h delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.c delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.h delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/api.h delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/hash.c delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.c delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.h delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-util.h delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.c delete mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.h create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.c create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.h create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/api.h create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/hash.c create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.c create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.h create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-util.h create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.c create mode 100644 subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/api.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/encrypt.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128-config.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-full-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-small-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-tiny-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-util.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128-config.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-full-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-small-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-tiny-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/api.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/encrypt.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128-config.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-full-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-small-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-tiny-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-util.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128-config.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-full-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-small-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-tiny-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/api.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/encrypt.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128-config.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-full-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-small-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-tiny-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-util.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128-config.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-full-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-small-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-tiny-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/api.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/encrypt.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128-config.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-full-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-small-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-tiny-avr.S delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-util.h delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.c delete mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128-config.h create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-full-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-small-avr.S create mode 100644 sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-tiny-avr.S delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/api.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/encrypt.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu-avr.S delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-util.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.h create mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu-avr.S delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/api.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/encrypt.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu-avr.S delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-util.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.h create mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu-avr.S delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/api.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/encrypt.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu-avr.S delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-util.h delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.c delete mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.h create mode 100644 tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu-avr.S delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.c delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.h delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/api.h delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/encrypt.c delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-util.h delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.c delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.h delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.c delete mode 100644 wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.h delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.c delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.h delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/api.h delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/encrypt.c delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-util.h delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo-avr.S delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.c delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.h delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.c delete mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.h create mode 100644 xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo-avr.S delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.c delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.h delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/api.h delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/hash.c delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-util.h delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo-avr.S delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.c delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.h delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.c delete mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.h create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.c create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.h create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/api.h create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/hash.c create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-util.h create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo-avr.S create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.c create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.h create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.c create mode 100644 xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.h diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.c b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.c deleted file mode 100644 index 7a68306..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.c +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "ace.h" -#include "internal-sliscp-light.h" -#include "internal-util.h" -#include - -/** - * \brief Size of the state for the internal ACE permutation. - */ -#define ACE_STATE_SIZE SLISCP_LIGHT320_STATE_SIZE - -/** - * \brief Rate for absorbing data into the ACE state and for - * squeezing data out again. - */ -#define ACE_RATE 8 - -aead_cipher_t const ace_cipher = { - "ACE", - ACE_KEY_SIZE, - ACE_NONCE_SIZE, - ACE_TAG_SIZE, - AEAD_FLAG_NONE, - ace_aead_encrypt, - ace_aead_decrypt -}; - -aead_hash_algorithm_t const ace_hash_algorithm = { - "ACE-HASH", - sizeof(ace_hash_state_t), - ACE_HASH_SIZE, - AEAD_FLAG_NONE, - ace_hash, - (aead_hash_init_t)ace_hash_init, - (aead_hash_update_t)ace_hash_update, - (aead_hash_finalize_t)ace_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/* Indices of where a rate byte is located in the state. We don't - * need this array any more because sliscp_light320_permute() operates - * on byte-swapped states where the rate bytes are contiguous in the - * first 8 bytes */ -/* -static unsigned char const ace_rate_posn[8] = { - 0, 1, 2, 3, 16, 17, 18, 19 -}; -*/ - -/** - * \brief Initializes the ACE state. - * - * \param state ACE permutation state. - * \param k Points to the 128-bit key. - * \param npub Points to the 128-bit nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void ace_init - (unsigned char state[ACE_STATE_SIZE], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state by interleaving the key and nonce */ - memcpy(state, k, 8); - memcpy(state + 8, npub, 8); - memcpy(state + 16, k + 8, 8); - memset(state + 24, 0, 8); - memcpy(state + 32, npub + 8, 8); - - /* Swap some of the state bytes to make the rate bytes contiguous */ - sliscp_light320_swap(state); - - /* Run the permutation to scramble the initial state */ - sliscp_light320_permute(state); - - /* Absorb the key in two further permutation operations */ - lw_xor_block(state, k, 8); - sliscp_light320_permute(state); - lw_xor_block(state, k + 8, 8); - sliscp_light320_permute(state); - - /* Absorb the associated data into the state */ - if (adlen != 0) { - while (adlen >= ACE_RATE) { - lw_xor_block(state, ad, ACE_RATE); - state[ACE_STATE_SIZE - 1] ^= 0x01; /* domain separation */ - sliscp_light320_permute(state); - ad += ACE_RATE; - adlen -= ACE_RATE; - } - temp = (unsigned)adlen; - lw_xor_block(state, ad, temp); - state[temp] ^= 0x80; /* padding */ - state[ACE_STATE_SIZE - 1] ^= 0x01; /* domain separation */ - sliscp_light320_permute(state); - } -} - -/** - * \brief Finalizes the ACE encryption or decryption operation. - * - * \param state ACE permutation state. - * \param k Points to the 128-bit key. - * \param tag Points to the 16 byte buffer to receive the computed tag. - */ -static void ace_finalize - (unsigned char state[ACE_STATE_SIZE], const unsigned char *k, - unsigned char *tag) -{ - /* Absorb the key into the state again */ - lw_xor_block(state, k, 8); - sliscp_light320_permute(state); - lw_xor_block(state, k + 8, 8); - sliscp_light320_permute(state); - - /* Swap the state bytes back to the canonical order */ - sliscp_light320_swap(state); - - /* Copy out the authentication tag */ - memcpy(tag, state, 8); - memcpy(tag + 8, state + 16, 8); -} - -int ace_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[ACE_STATE_SIZE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ACE_TAG_SIZE; - - /* Initialize the ACE state and absorb the associated data */ - ace_init(state, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= ACE_RATE) { - lw_xor_block_2_dest(c, state, m, ACE_RATE); - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += ACE_RATE; - m += ACE_RATE; - mlen -= ACE_RATE; - } - temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state, m, temp); - state[temp] ^= 0x80; /* padding */ - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += mlen; - - /* Generate the authentication tag */ - ace_finalize(state, k, c); - return 0; -} - -int ace_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[ACE_STATE_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ACE_TAG_SIZE) - return -1; - *mlen = clen - ACE_TAG_SIZE; - - /* Initialize the ACE state and absorb the associated data */ - ace_init(state, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ACE_TAG_SIZE; - while (clen >= ACE_RATE) { - lw_xor_block_swap(m, state, c, ACE_RATE); - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += ACE_RATE; - m += ACE_RATE; - clen -= ACE_RATE; - } - temp = (unsigned)clen; - lw_xor_block_swap(m, state, c, temp); - state[temp] ^= 0x80; /* padding */ - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += clen; - - /* Finalize the ACE state and compare against the authentication tag */ - ace_finalize(state, k, state); - return aead_check_tag(mtemp, *mlen, state, c, ACE_TAG_SIZE); -} - -/* Pre-hashed version of the ACE-HASH initialization vector */ -static unsigned char const ace_hash_iv[ACE_STATE_SIZE] = { - 0xb9, 0x7d, 0xda, 0x3f, 0x66, 0x2c, 0xd1, 0xa6, - 0x65, 0xd1, 0x80, 0xd6, 0x49, 0xdc, 0xa1, 0x8c, - 0x0c, 0x5f, 0x0e, 0xca, 0x70, 0x37, 0x58, 0x75, - 0x29, 0x7d, 0xb0, 0xb0, 0x72, 0x73, 0xce, 0xa8, - 0x99, 0x71, 0xde, 0x8a, 0x9a, 0x65, 0x72, 0x24 -}; - -int ace_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[ACE_STATE_SIZE]; - unsigned temp; - - /* Load the initialization vector and hash it, which can be pre-computed */ - /* - memset(state, 0, sizeof(state)); - state[8] = 0x80; - state[9] = 0x40; - state[10] = 0x40; - sliscp_light320_swap(state); - sliscp_light320_permute(state); - */ - memcpy(state, ace_hash_iv, ACE_STATE_SIZE); - - /* Absorb the input data */ - while (inlen >= ACE_RATE) { - lw_xor_block(state, in, ACE_RATE); - sliscp_light320_permute(state); - in += ACE_RATE; - inlen -= ACE_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state, in, temp); - state[temp] ^= 0x80; /* padding */ - sliscp_light320_permute(state); - - /* Squeeze out the hash value */ - memcpy(out, state, 8); - for (temp = 0; temp < 3; ++temp) { - out += 8; - sliscp_light320_permute(state); - memcpy(out, state, 8); - } - return 0; -} - -void ace_hash_init(ace_hash_state_t *state) -{ - memcpy(state->s.state, ace_hash_iv, ACE_STATE_SIZE); - state->s.count = 0; -} - -void ace_hash_update - (ace_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned len; - - /* Handle the left-over rate block from last time */ - if (state->s.count != 0) { - len = ACE_RATE - state->s.count; - if (len > inlen) - len = (unsigned)inlen; - lw_xor_block(state->s.state + state->s.count, in, len); - in += len; - inlen -= len; - state->s.count += len; - if (state->s.count >= ACE_RATE) { - sliscp_light320_permute(state->s.state); - state->s.count = 0; - } else { - /* Not enough input data yet to fill up the whole block */ - return; - } - } - - /* Process as many full rate blocks as we can */ - while (inlen >= ACE_RATE) { - lw_xor_block(state->s.state, in, ACE_RATE); - sliscp_light320_permute(state->s.state); - in += ACE_RATE; - inlen -= ACE_RATE; - } - - /* Handle any left-over data */ - len = (unsigned)inlen; - lw_xor_block(state->s.state, in, len); - state->s.count = len; -} - -void ace_hash_finalize(ace_hash_state_t *state, unsigned char *out) -{ - unsigned temp; - - /* Pad and hash the final input block */ - state->s.state[state->s.count] ^= 0x80; - sliscp_light320_permute(state->s.state); - state->s.count = 0; - - /* Squeeze out the hash value */ - memcpy(out, state->s.state, 9); - for (temp = 0; temp < 3; ++temp) { - out += 8; - sliscp_light320_permute(state->s.state); - memcpy(out, state->s.state, 8); - } -} diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.h b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.h deleted file mode 100644 index 4497927..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/ace.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ACE_H -#define LWCRYPTO_ACE_H - -#include "aead-common.h" - -/** - * \file ace.h - * \brief ACE authenticated encryption algorithm. - * - * ACE is an authenticated encryption algorithm with a 128-bit key, - * a 128-bit nonce, and a 128-bit tag. It uses a duplex construction - * on top of a 320-bit permutation. The permutation is a generalised - * version of sLiSCP-light, extended from 256 bits to 320 bits. - * ACE also has a companion hash algorithm with a 256-bit output. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/ace - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ACE. - */ -#define ACE_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for ACE. - */ -#define ACE_TAG_SIZE 16 - -/** - * \brief Size of the nonce for ACE. - */ -#define ACE_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for ACE-HASH. - */ -#define ACE_HASH_SIZE 32 - -/** - * \brief Meta-information block for the ACE cipher. - */ -extern aead_cipher_t const ace_cipher; - -/** - * \brief Meta-information block for the ACE-HASH hash algorithm. - */ -extern aead_hash_algorithm_t const ace_hash_algorithm; - -/** - * \brief State information for the ACE-HASH incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[40]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} ace_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with ACE. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ace_aead_decrypt() - */ -int ace_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ACE. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ace_aead_encrypt() - */ -int ace_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ACE-HASH to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ACE_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int ace_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ACE-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ace_hash_update(), ace_hash_finalize(), ace_hash() - */ -void ace_hash_init(ace_hash_state_t *state); - -/** - * \brief Updates the ACE-HASH state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa ace_hash_init(), ace_hash_finalize() - */ -void ace_hash_update - (ace_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an ACE-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa ace_hash_init(), ace_hash_update() - */ -void ace_hash_finalize(ace_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.c b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.h b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/api.h b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/encrypt.c b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/encrypt.c deleted file mode 100644 index 99cb7f3..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "ace.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return ace_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return ace_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-320-avr.S b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-320-avr.S deleted file mode 100644 index 2522d5c..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-320-avr.S +++ /dev/null @@ -1,1767 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 96 -table_0: - .byte 7 - .byte 83 - .byte 67 - .byte 80 - .byte 40 - .byte 20 - .byte 10 - .byte 93 - .byte 228 - .byte 92 - .byte 174 - .byte 87 - .byte 155 - .byte 73 - .byte 94 - .byte 145 - .byte 72 - .byte 36 - .byte 224 - .byte 127 - .byte 204 - .byte 141 - .byte 198 - .byte 99 - .byte 209 - .byte 190 - .byte 50 - .byte 83 - .byte 169 - .byte 84 - .byte 26 - .byte 29 - .byte 78 - .byte 96 - .byte 48 - .byte 24 - .byte 34 - .byte 40 - .byte 117 - .byte 104 - .byte 52 - .byte 154 - .byte 247 - .byte 108 - .byte 37 - .byte 225 - .byte 112 - .byte 56 - .byte 98 - .byte 130 - .byte 253 - .byte 246 - .byte 123 - .byte 189 - .byte 150 - .byte 71 - .byte 249 - .byte 157 - .byte 206 - .byte 103 - .byte 113 - .byte 107 - .byte 118 - .byte 64 - .byte 32 - .byte 16 - .byte 170 - .byte 136 - .byte 160 - .byte 79 - .byte 39 - .byte 19 - .byte 43 - .byte 220 - .byte 176 - .byte 190 - .byte 95 - .byte 47 - .byte 233 - .byte 139 - .byte 9 - .byte 91 - .byte 173 - .byte 214 - .byte 207 - .byte 89 - .byte 30 - .byte 233 - .byte 116 - .byte 186 - .byte 183 - .byte 198 - .byte 173 - .byte 127 - .byte 63 - .byte 31 - - .text -.global sliscp_light320_permute - .type sliscp_light320_permute, @function -sliscp_light320_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 34 - ldd r21,Z+8 - ldd r20,Z+9 - ldd r19,Z+10 - ldd r18,Z+11 - ldd r27,Z+12 - ldd r26,Z+13 - ldd r23,Z+14 - ldd r22,Z+15 - ldd r5,Z+24 - ldd r4,Z+25 - ldd r3,Z+26 - ldd r2,Z+27 - ldd r9,Z+28 - ldd r8,Z+29 - ldd r7,Z+30 - ldd r6,Z+31 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r26 - std Y+8,r27 - std Y+9,r2 - std Y+10,r3 - std Y+11,r4 - std Y+12,r5 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ld r21,Z - ldd r20,Z+1 - ldd r19,Z+2 - ldd r18,Z+3 - ldd r27,Z+16 - ldd r26,Z+17 - ldd r23,Z+18 - ldd r22,Z+19 - ldd r5,Z+4 - ldd r4,Z+5 - ldd r3,Z+6 - ldd r2,Z+7 - ldd r9,Z+20 - ldd r8,Z+21 - ldd r7,Z+22 - ldd r6,Z+23 - ldd r13,Z+32 - ldd r12,Z+33 - ldd r11,Z+34 - ldd r10,Z+35 - ldd r25,Z+36 - ldd r24,Z+37 - ldd r15,Z+38 - ldd r14,Z+39 - push r31 - push r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r16,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r16 -#endif - ldi r30,0 -60: -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - inc r30 - push r30 - mov r30,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - pop r30 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - inc r30 - push r30 - mov r30,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - pop r30 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - inc r30 - push r30 - mov r30,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - pop r30 - ldd r16,Y+1 - ldd r17,Y+2 - ldd r1,Y+3 - ldd r0,Y+4 - eor r16,r2 - eor r17,r3 - eor r1,r4 - eor r0,r5 - com r16 - com r17 - com r1 - com r0 - std Y+1,r16 - std Y+2,r17 - std Y+3,r1 - std Y+4,r0 - ldd r16,Y+5 - ldd r17,Y+6 - ldd r1,Y+7 - ldd r0,Y+8 - eor r16,r6 - eor r17,r7 - eor r1,r8 - eor r0,r9 - com r17 - com r1 - com r0 - std Y+6,r17 - std Y+7,r1 - std Y+8,r0 -#if defined(RAMPZ) - elpm r0,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r0,Z -#elif defined(__AVR_TINY__) - ld r0,Z -#else - lpm -#endif - inc r30 - eor r16,r0 - std Y+5,r16 - ldd r16,Y+9 - ldd r17,Y+10 - ldd r1,Y+11 - ldd r0,Y+12 - eor r16,r10 - eor r17,r11 - eor r1,r12 - eor r0,r13 - com r16 - com r17 - com r1 - com r0 - std Y+9,r16 - std Y+10,r17 - std Y+11,r1 - std Y+12,r0 - ldd r16,Y+13 - ldd r17,Y+14 - ldd r1,Y+15 - ldd r0,Y+16 - eor r16,r14 - eor r17,r15 - eor r1,r24 - eor r0,r25 - com r17 - com r1 - com r0 - std Y+14,r17 - std Y+15,r1 - std Y+16,r0 -#if defined(RAMPZ) - elpm r0,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r0,Z -#elif defined(__AVR_TINY__) - ld r0,Z -#else - lpm -#endif - inc r30 - eor r16,r0 - std Y+13,r16 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - com r10 - com r11 - com r12 - com r13 - eor r14,r22 - eor r15,r23 - eor r24,r26 - eor r25,r27 - com r15 - com r24 - com r25 -#if defined(RAMPZ) - elpm r0,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r0,Z -#elif defined(__AVR_TINY__) - ld r0,Z -#else - lpm -#endif - inc r30 - eor r14,r0 - movw r16,r10 - mov r1,r12 - mov r0,r13 - ldd r10,Y+1 - ldd r11,Y+2 - ldd r12,Y+3 - ldd r13,Y+4 - std Y+1,r2 - std Y+2,r3 - std Y+3,r4 - std Y+4,r5 - movw r2,r18 - movw r4,r20 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - std Y+9,r16 - std Y+10,r17 - std Y+11,r1 - std Y+12,r0 - movw r16,r14 - mov r1,r24 - mov r0,r25 - ldd r14,Y+5 - ldd r15,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - movw r6,r22 - movw r8,r26 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - std Y+13,r16 - std Y+14,r17 - std Y+15,r1 - std Y+16,r0 - ldi r17,96 - cpse r30,r17 - rjmp 60b -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r21 - std Z+1,r20 - std Z+2,r19 - std Z+3,r18 - std Z+16,r27 - std Z+17,r26 - std Z+18,r23 - std Z+19,r22 - std Z+4,r5 - std Z+5,r4 - std Z+6,r3 - std Z+7,r2 - std Z+20,r9 - std Z+21,r8 - std Z+22,r7 - std Z+23,r6 - std Z+32,r13 - std Z+33,r12 - std Z+34,r11 - std Z+35,r10 - std Z+36,r25 - std Z+37,r24 - std Z+38,r15 - std Z+39,r14 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - ldd r2,Y+9 - ldd r3,Y+10 - ldd r4,Y+11 - ldd r5,Y+12 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - std Z+8,r21 - std Z+9,r20 - std Z+10,r19 - std Z+11,r18 - std Z+12,r27 - std Z+13,r26 - std Z+14,r23 - std Z+15,r22 - std Z+24,r5 - std Z+25,r4 - std Z+26,r3 - std Z+27,r2 - std Z+28,r9 - std Z+29,r8 - std Z+30,r7 - std Z+31,r6 - adiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - eor r1,r1 - ret - .size sliscp_light320_permute, .-sliscp_light320_permute - - .text -.global sliscp_light320_swap - .type sliscp_light320_swap, @function -sliscp_light320_swap: - movw r30,r24 -.L__stack_usage = 2 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - ret - .size sliscp_light320_swap, .-sliscp_light320_swap - -#endif diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.c b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.c deleted file mode 100644 index dd3a688..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sliscp-light.h" - -#if !defined(__AVR__) - -/** - * \brief Performs one round of the Simeck-64 block cipher. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - */ -#define simeck64_round(x, y) \ - do { \ - (y) ^= (leftRotate5((x)) & (x)) ^ leftRotate1((x)) ^ \ - 0xFFFFFFFEU ^ (_rc & 1); \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 64-bit block with the 8 round version of Simeck-64. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck64_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck64_round(x, y); /* Round 1 */ \ - simeck64_round(y, x); /* Round 2 */ \ - simeck64_round(x, y); /* Round 3 */ \ - simeck64_round(y, x); /* Round 4 */ \ - simeck64_round(x, y); /* Round 5 */ \ - simeck64_round(y, x); /* Round 6 */ \ - simeck64_round(x, y); /* Round 7 */ \ - simeck64_round(y, x); /* Round 8 */ \ - } while (0) - -/* Helper macros for 48-bit left rotations */ -#define leftRotate5_48(x) (((x) << 5) | ((x) >> 19)) -#define leftRotate1_48(x) (((x) << 1) | ((x) >> 23)) - -/** - * \brief Performs one round of the Simeck-48 block cipher. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - */ -#define simeck48_round(x, y) \ - do { \ - (y) ^= (leftRotate5_48((x)) & (x)) ^ leftRotate1_48((x)) ^ \ - 0x00FFFFFEU ^ (_rc & 1); \ - (y) &= 0x00FFFFFFU; \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 48-bit block with the 6 round version of Simeck-48. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck48_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck48_round(x, y); /* Round 1 */ \ - simeck48_round(y, x); /* Round 2 */ \ - simeck48_round(x, y); /* Round 3 */ \ - simeck48_round(y, x); /* Round 4 */ \ - simeck48_round(x, y); /* Round 5 */ \ - simeck48_round(y, x); /* Round 6 */ \ - } while (0) - -/* Interleaved rc0, rc1, sc0, and sc1 values for each round */ -static unsigned char const sliscp_light256_RC[18 * 4] = { - 0x0f, 0x47, 0x08, 0x64, 0x04, 0xb2, 0x86, 0x6b, - 0x43, 0xb5, 0xe2, 0x6f, 0xf1, 0x37, 0x89, 0x2c, - 0x44, 0x96, 0xe6, 0xdd, 0x73, 0xee, 0xca, 0x99, - 0xe5, 0x4c, 0x17, 0xea, 0x0b, 0xf5, 0x8e, 0x0f, - 0x47, 0x07, 0x64, 0x04, 0xb2, 0x82, 0x6b, 0x43, - 0xb5, 0xa1, 0x6f, 0xf1, 0x37, 0x78, 0x2c, 0x44, - 0x96, 0xa2, 0xdd, 0x73, 0xee, 0xb9, 0x99, 0xe5, - 0x4c, 0xf2, 0xea, 0x0b, 0xf5, 0x85, 0x0f, 0x47, - 0x07, 0x23, 0x04, 0xb2, 0x82, 0xd9, 0x43, 0xb5 -}; - -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 24); /* Assumes the block is pre-swapped */ - x4 = be_load_word32(block + 16); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 12); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 8, x2); - be_store_word32(block + 24, x3); /* Assumes the block is pre-swapped */ - be_store_word32(block + 16, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 12, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spix(unsigned char block[32]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 12); - t2 = le_load_word32(block + 24); - le_store_word32(block + 24, t1); - le_store_word32(block + 12, t2); -} - -void sliscp_light256_permute_spoc(unsigned char block[32]) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x3 = be_load_word32(block + 20); - x4 = be_load_word32(block + 8); - x5 = be_load_word32(block + 12); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 16, x2); /* Assumes the block is pre-swapped */ - be_store_word32(block + 20, x3); - be_store_word32(block + 8, x4); - be_store_word32(block + 12, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spoc(unsigned char block[32]) -{ - uint64_t t1, t2; - t1 = le_load_word64(block + 8); - t2 = le_load_word64(block + 16); - le_store_word64(block + 16, t1); - le_store_word64(block + 8, t2); -} - -/* Load a big-endian 24-bit word from a byte buffer */ -#define be_load_word24(ptr) \ - ((((uint32_t)((ptr)[0])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[2]))) - -/* Store a big-endian 24-bit word into a byte buffer */ -#define be_store_word24(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 16); \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)_x; \ - } while (0) - -void sliscp_light192_permute(unsigned char block[24]) -{ - /* Interleaved rc0, rc1, sc0, and sc1 values for each round */ - static unsigned char const RC[18 * 4] = { - 0x07, 0x27, 0x08, 0x29, 0x04, 0x34, 0x0c, 0x1d, - 0x06, 0x2e, 0x0a, 0x33, 0x25, 0x19, 0x2f, 0x2a, - 0x17, 0x35, 0x38, 0x1f, 0x1c, 0x0f, 0x24, 0x10, - 0x12, 0x08, 0x36, 0x18, 0x3b, 0x0c, 0x0d, 0x14, - 0x26, 0x0a, 0x2b, 0x1e, 0x15, 0x2f, 0x3e, 0x31, - 0x3f, 0x38, 0x01, 0x09, 0x20, 0x24, 0x21, 0x2d, - 0x30, 0x36, 0x11, 0x1b, 0x28, 0x0d, 0x39, 0x16, - 0x3c, 0x2b, 0x05, 0x3d, 0x22, 0x3e, 0x27, 0x03, - 0x13, 0x01, 0x34, 0x02, 0x1a, 0x21, 0x2e, 0x23 - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables. Each 24-bit block is - * placed into a separate 32-bit word which improves efficiency below */ - x0 = be_load_word24(block); - x1 = be_load_word24(block + 3); - x2 = be_load_word24(block + 6); - x3 = be_load_word24(block + 9); - x4 = be_load_word24(block + 12); - x5 = be_load_word24(block + 15); - x6 = be_load_word24(block + 18); - x7 = be_load_word24(block + 21); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-48 to two of the 48-bit sub-blocks */ - simeck48_box(x2, x3, rc[0]); - simeck48_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0x00FFFFFFU; - x1 ^= 0x00FFFF00U ^ rc[2]; - x4 ^= 0x00FFFFFFU; - x5 ^= 0x00FFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word24(block, x0); - be_store_word24(block + 3, x1); - be_store_word24(block + 6, x2); - be_store_word24(block + 9, x3); - be_store_word24(block + 12, x4); - be_store_word24(block + 15, x5); - be_store_word24(block + 18, x6); - be_store_word24(block + 21, x7); -} - -void sliscp_light320_permute(unsigned char block[40]) -{ - /* Interleaved rc0, rc1, rc2, sc0, sc1, and sc2 values for each round */ - static unsigned char const RC[16 * 6] = { - 0x07, 0x53, 0x43, 0x50, 0x28, 0x14, 0x0a, 0x5d, - 0xe4, 0x5c, 0xae, 0x57, 0x9b, 0x49, 0x5e, 0x91, - 0x48, 0x24, 0xe0, 0x7f, 0xcc, 0x8d, 0xc6, 0x63, - 0xd1, 0xbe, 0x32, 0x53, 0xa9, 0x54, 0x1a, 0x1d, - 0x4e, 0x60, 0x30, 0x18, 0x22, 0x28, 0x75, 0x68, - 0x34, 0x9a, 0xf7, 0x6c, 0x25, 0xe1, 0x70, 0x38, - 0x62, 0x82, 0xfd, 0xf6, 0x7b, 0xbd, 0x96, 0x47, - 0xf9, 0x9d, 0xce, 0x67, 0x71, 0x6b, 0x76, 0x40, - 0x20, 0x10, 0xaa, 0x88, 0xa0, 0x4f, 0x27, 0x13, - 0x2b, 0xdc, 0xb0, 0xbe, 0x5f, 0x2f, 0xe9, 0x8b, - 0x09, 0x5b, 0xad, 0xd6, 0xcf, 0x59, 0x1e, 0xe9, - 0x74, 0xba, 0xb7, 0xc6, 0xad, 0x7f, 0x3f, 0x1f - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 12); - x4 = be_load_word32(block + 4); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - x8 = be_load_word32(block + 32); - x9 = be_load_word32(block + 36); - - /* Perform all permutation rounds */ - for (round = 0; round < 16; ++round, rc += 6) { - /* Apply Simeck-64 to three of the 64-bit sub-blocks */ - simeck64_box(x0, x1, rc[0]); - simeck64_box(x4, x5, rc[1]); - simeck64_box(x8, x9, rc[2]); - x6 ^= x8; - x7 ^= x9; - x2 ^= x4; - x3 ^= x5; - x8 ^= x0; - x9 ^= x1; - - /* Add step constants */ - x2 ^= 0xFFFFFFFFU; - x3 ^= 0xFFFFFF00U ^ rc[3]; - x6 ^= 0xFFFFFFFFU; - x7 ^= 0xFFFFFF00U ^ rc[4]; - x8 ^= 0xFFFFFFFFU; - x9 ^= 0xFFFFFF00U ^ rc[5]; - - /* Rotate the sub-blocks */ - t0 = x8; - t1 = x9; - x8 = x2; - x9 = x3; - x2 = x4; - x3 = x5; - x4 = x0; - x5 = x1; - x0 = x6; - x1 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 16, x1); /* Assumes the block is pre-swapped */ - be_store_word32(block + 8, x2); - be_store_word32(block + 12, x3); - be_store_word32(block + 4, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); - be_store_word32(block + 32, x8); - be_store_word32(block + 36, x9); -} - -void sliscp_light320_swap(unsigned char block[40]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 4); - t2 = le_load_word32(block + 16); - le_store_word32(block + 16, t1); - le_store_word32(block + 4, t2); -} - -#endif /* !__AVR__ */ diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.h b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.h deleted file mode 100644 index 8a5e8d5..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-sliscp-light.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SLISCP_LIGHT_H -#define LW_INTERNAL_SLISCP_LIGHT_H - -/** - * \file internal-sliscp-light.h - * \brief sLiSCP-light permutation - * - * There are three variants of sLiSCP-light in use in the NIST submissions: - * - * \li sLiSCP-light-256 with a 256-bit block size, used in SPIX and SpoC. - * \li sLiSCP-light-192 with a 192-bit block size, used in SpoC. - * \li sLiSCP-light-320 with a 320-bit block size, used in ACE. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/ace, - * https://uwaterloo.ca/communications-security-lab/lwc/spix, - * https://uwaterloo.ca/communications-security-lab/lwc/spoc - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for sLiSCP-light-256. - */ -#define SLISCP_LIGHT256_STATE_SIZE 32 - -/** - * \brief Size of the state for sLiSCP-light-192. - */ -#define SLISCP_LIGHT192_STATE_SIZE 24 - -/** - * \brief Size of the state for sLiSCP-light-320. - */ -#define SLISCP_LIGHT320_STATE_SIZE 40 - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SPIX cipher. SPIX places the rate bytes at - * positions 8, 9, 10, 11, 24, 25, 26, and 27. - * - * This function assumes that bytes 24-27 have been pre-swapped with - * bytes 12-15 so that the rate portion of the state is contiguous. - * - * The sliscp_light256_swap_spix() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spix() - */ -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SPIX. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spix() - */ -void sliscp_light256_swap_spix(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SpoC-128 cipher. SpoC-128 interleaves the - * rate bytes and the mask bytes. This version assumes that the - * rate and mask are in contiguous bytes of the state. - * - * SpoC-128 absorbs bytes using the mask bytes of the state at offsets - * 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, and 31. - * It squeezes bytes using the rate bytes of the state at offsets - * 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, and 23. - * - * This function assumes that bytes 8-15 have been pre-swapped with 16-23 - * so that the rate and mask portions of the state are contiguous. - * - * The sliscp_light256_swap_spoc() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spoc() - */ -void sliscp_light256_permute_spoc(unsigned char block[32]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spoc() - */ -void sliscp_light256_swap_spoc(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 192-bit block. - * - * \param block Points to the block to be permuted. - */ -void sliscp_light192_permute(unsigned char block[24]); - -/** - * \brief Performs the sLiSCP-light permutation on a 320-bit block. - * - * \param block Points to the block to be permuted. - * - * The ACE specification refers to this permutation as "ACE" but that - * can be confused with the name of the AEAD mode so we call this - * permutation "sLiSCP-light-320" instead. - * - * ACE absorbs and squeezes data at the rate bytes 0, 1, 2, 3, 16, 17, 18, 19. - * Efficiency can suffer because of the discontinuity in rate byte positions. - * - * To counteract this, we assume that the input to the permutation has been - * pre-swapped: bytes 4, 5, 6, 7 are swapped with bytes 16, 17, 18, 19 so - * that the rate is contiguous at the start of the state. - * - * The sliscp_light320_swap() function can be used to switch between the - * canonical order and the pre-swapped order. - * - * \sa sliscp_light320_swap() - */ -void sliscp_light320_permute(unsigned char block[40]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 320-bit block. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light320_permute() - */ -void sliscp_light320_swap(unsigned char block[40]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-util.h b/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-320-avr.S b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-320-avr.S new file mode 100644 index 0000000..2522d5c --- /dev/null +++ b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-320-avr.S @@ -0,0 +1,1767 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 96 +table_0: + .byte 7 + .byte 83 + .byte 67 + .byte 80 + .byte 40 + .byte 20 + .byte 10 + .byte 93 + .byte 228 + .byte 92 + .byte 174 + .byte 87 + .byte 155 + .byte 73 + .byte 94 + .byte 145 + .byte 72 + .byte 36 + .byte 224 + .byte 127 + .byte 204 + .byte 141 + .byte 198 + .byte 99 + .byte 209 + .byte 190 + .byte 50 + .byte 83 + .byte 169 + .byte 84 + .byte 26 + .byte 29 + .byte 78 + .byte 96 + .byte 48 + .byte 24 + .byte 34 + .byte 40 + .byte 117 + .byte 104 + .byte 52 + .byte 154 + .byte 247 + .byte 108 + .byte 37 + .byte 225 + .byte 112 + .byte 56 + .byte 98 + .byte 130 + .byte 253 + .byte 246 + .byte 123 + .byte 189 + .byte 150 + .byte 71 + .byte 249 + .byte 157 + .byte 206 + .byte 103 + .byte 113 + .byte 107 + .byte 118 + .byte 64 + .byte 32 + .byte 16 + .byte 170 + .byte 136 + .byte 160 + .byte 79 + .byte 39 + .byte 19 + .byte 43 + .byte 220 + .byte 176 + .byte 190 + .byte 95 + .byte 47 + .byte 233 + .byte 139 + .byte 9 + .byte 91 + .byte 173 + .byte 214 + .byte 207 + .byte 89 + .byte 30 + .byte 233 + .byte 116 + .byte 186 + .byte 183 + .byte 198 + .byte 173 + .byte 127 + .byte 63 + .byte 31 + + .text +.global sliscp_light320_permute + .type sliscp_light320_permute, @function +sliscp_light320_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 34 + ldd r21,Z+8 + ldd r20,Z+9 + ldd r19,Z+10 + ldd r18,Z+11 + ldd r27,Z+12 + ldd r26,Z+13 + ldd r23,Z+14 + ldd r22,Z+15 + ldd r5,Z+24 + ldd r4,Z+25 + ldd r3,Z+26 + ldd r2,Z+27 + ldd r9,Z+28 + ldd r8,Z+29 + ldd r7,Z+30 + ldd r6,Z+31 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r26 + std Y+8,r27 + std Y+9,r2 + std Y+10,r3 + std Y+11,r4 + std Y+12,r5 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ld r21,Z + ldd r20,Z+1 + ldd r19,Z+2 + ldd r18,Z+3 + ldd r27,Z+16 + ldd r26,Z+17 + ldd r23,Z+18 + ldd r22,Z+19 + ldd r5,Z+4 + ldd r4,Z+5 + ldd r3,Z+6 + ldd r2,Z+7 + ldd r9,Z+20 + ldd r8,Z+21 + ldd r7,Z+22 + ldd r6,Z+23 + ldd r13,Z+32 + ldd r12,Z+33 + ldd r11,Z+34 + ldd r10,Z+35 + ldd r25,Z+36 + ldd r24,Z+37 + ldd r15,Z+38 + ldd r14,Z+39 + push r31 + push r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r16,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r16 +#endif + ldi r30,0 +60: +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + inc r30 + push r30 + mov r30,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + pop r30 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + inc r30 + push r30 + mov r30,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + pop r30 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + inc r30 + push r30 + mov r30,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + pop r30 + ldd r16,Y+1 + ldd r17,Y+2 + ldd r1,Y+3 + ldd r0,Y+4 + eor r16,r2 + eor r17,r3 + eor r1,r4 + eor r0,r5 + com r16 + com r17 + com r1 + com r0 + std Y+1,r16 + std Y+2,r17 + std Y+3,r1 + std Y+4,r0 + ldd r16,Y+5 + ldd r17,Y+6 + ldd r1,Y+7 + ldd r0,Y+8 + eor r16,r6 + eor r17,r7 + eor r1,r8 + eor r0,r9 + com r17 + com r1 + com r0 + std Y+6,r17 + std Y+7,r1 + std Y+8,r0 +#if defined(RAMPZ) + elpm r0,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r0,Z +#elif defined(__AVR_TINY__) + ld r0,Z +#else + lpm +#endif + inc r30 + eor r16,r0 + std Y+5,r16 + ldd r16,Y+9 + ldd r17,Y+10 + ldd r1,Y+11 + ldd r0,Y+12 + eor r16,r10 + eor r17,r11 + eor r1,r12 + eor r0,r13 + com r16 + com r17 + com r1 + com r0 + std Y+9,r16 + std Y+10,r17 + std Y+11,r1 + std Y+12,r0 + ldd r16,Y+13 + ldd r17,Y+14 + ldd r1,Y+15 + ldd r0,Y+16 + eor r16,r14 + eor r17,r15 + eor r1,r24 + eor r0,r25 + com r17 + com r1 + com r0 + std Y+14,r17 + std Y+15,r1 + std Y+16,r0 +#if defined(RAMPZ) + elpm r0,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r0,Z +#elif defined(__AVR_TINY__) + ld r0,Z +#else + lpm +#endif + inc r30 + eor r16,r0 + std Y+13,r16 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + com r10 + com r11 + com r12 + com r13 + eor r14,r22 + eor r15,r23 + eor r24,r26 + eor r25,r27 + com r15 + com r24 + com r25 +#if defined(RAMPZ) + elpm r0,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r0,Z +#elif defined(__AVR_TINY__) + ld r0,Z +#else + lpm +#endif + inc r30 + eor r14,r0 + movw r16,r10 + mov r1,r12 + mov r0,r13 + ldd r10,Y+1 + ldd r11,Y+2 + ldd r12,Y+3 + ldd r13,Y+4 + std Y+1,r2 + std Y+2,r3 + std Y+3,r4 + std Y+4,r5 + movw r2,r18 + movw r4,r20 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + std Y+9,r16 + std Y+10,r17 + std Y+11,r1 + std Y+12,r0 + movw r16,r14 + mov r1,r24 + mov r0,r25 + ldd r14,Y+5 + ldd r15,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + movw r6,r22 + movw r8,r26 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + std Y+13,r16 + std Y+14,r17 + std Y+15,r1 + std Y+16,r0 + ldi r17,96 + cpse r30,r17 + rjmp 60b +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r21 + std Z+1,r20 + std Z+2,r19 + std Z+3,r18 + std Z+16,r27 + std Z+17,r26 + std Z+18,r23 + std Z+19,r22 + std Z+4,r5 + std Z+5,r4 + std Z+6,r3 + std Z+7,r2 + std Z+20,r9 + std Z+21,r8 + std Z+22,r7 + std Z+23,r6 + std Z+32,r13 + std Z+33,r12 + std Z+34,r11 + std Z+35,r10 + std Z+36,r25 + std Z+37,r24 + std Z+38,r15 + std Z+39,r14 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + ldd r2,Y+9 + ldd r3,Y+10 + ldd r4,Y+11 + ldd r5,Y+12 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + std Z+8,r21 + std Z+9,r20 + std Z+10,r19 + std Z+11,r18 + std Z+12,r27 + std Z+13,r26 + std Z+14,r23 + std Z+15,r22 + std Z+24,r5 + std Z+25,r4 + std Z+26,r3 + std Z+27,r2 + std Z+28,r9 + std Z+29,r8 + std Z+30,r7 + std Z+31,r6 + adiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + eor r1,r1 + ret + .size sliscp_light320_permute, .-sliscp_light320_permute + + .text +.global sliscp_light320_swap + .type sliscp_light320_swap, @function +sliscp_light320_swap: + movw r30,r24 +.L__stack_usage = 2 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + ret + .size sliscp_light320_swap, .-sliscp_light320_swap + +#endif diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.c b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.c index 69b4519..dd3a688 100644 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.c +++ b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.c @@ -22,6 +22,8 @@ #include "internal-sliscp-light.h" +#if !defined(__AVR__) + /** * \brief Performs one round of the Simeck-64 block cipher. * @@ -173,11 +175,12 @@ void sliscp_light256_swap_spix(unsigned char block[32]) le_store_word32(block + 12, t2); } -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) +void sliscp_light256_permute_spoc(unsigned char block[32]) { const unsigned char *rc = sliscp_light256_RC; uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t t0, t1; + unsigned round; /* Load the block into local state variables */ x0 = be_load_word32(block); @@ -190,7 +193,7 @@ void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) x7 = be_load_word32(block + 28); /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { + for (round = 0; round < 18; ++round, rc += 4) { /* Apply Simeck-64 to two of the 64-bit sub-blocks */ simeck64_box(x2, x3, rc[0]); simeck64_box(x6, x7, rc[1]); @@ -406,3 +409,5 @@ void sliscp_light320_swap(unsigned char block[40]) le_store_word32(block + 16, t1); le_store_word32(block + 4, t2); } + +#endif /* !__AVR__ */ diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.h b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.h index fa6b9ba..8a5e8d5 100644 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.h +++ b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-sliscp-light.h @@ -92,7 +92,6 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * \brief Performs the sLiSCP-light permutation on a 256-bit block. * * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. * * The bytes of the block are assumed to be rearranged to match the * requirements of the SpoC-128 cipher. SpoC-128 interleaves the @@ -112,7 +111,7 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * * \sa sliscp_light256_swap_spoc() */ -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds); +void sliscp_light256_permute_spoc(unsigned char block[32]); /** * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. diff --git a/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-util.h b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-util.h +++ b/ace/Implementations/crypto_aead/aceae128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.c b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.c deleted file mode 100644 index 7a68306..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.c +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "ace.h" -#include "internal-sliscp-light.h" -#include "internal-util.h" -#include - -/** - * \brief Size of the state for the internal ACE permutation. - */ -#define ACE_STATE_SIZE SLISCP_LIGHT320_STATE_SIZE - -/** - * \brief Rate for absorbing data into the ACE state and for - * squeezing data out again. - */ -#define ACE_RATE 8 - -aead_cipher_t const ace_cipher = { - "ACE", - ACE_KEY_SIZE, - ACE_NONCE_SIZE, - ACE_TAG_SIZE, - AEAD_FLAG_NONE, - ace_aead_encrypt, - ace_aead_decrypt -}; - -aead_hash_algorithm_t const ace_hash_algorithm = { - "ACE-HASH", - sizeof(ace_hash_state_t), - ACE_HASH_SIZE, - AEAD_FLAG_NONE, - ace_hash, - (aead_hash_init_t)ace_hash_init, - (aead_hash_update_t)ace_hash_update, - (aead_hash_finalize_t)ace_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/* Indices of where a rate byte is located in the state. We don't - * need this array any more because sliscp_light320_permute() operates - * on byte-swapped states where the rate bytes are contiguous in the - * first 8 bytes */ -/* -static unsigned char const ace_rate_posn[8] = { - 0, 1, 2, 3, 16, 17, 18, 19 -}; -*/ - -/** - * \brief Initializes the ACE state. - * - * \param state ACE permutation state. - * \param k Points to the 128-bit key. - * \param npub Points to the 128-bit nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void ace_init - (unsigned char state[ACE_STATE_SIZE], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state by interleaving the key and nonce */ - memcpy(state, k, 8); - memcpy(state + 8, npub, 8); - memcpy(state + 16, k + 8, 8); - memset(state + 24, 0, 8); - memcpy(state + 32, npub + 8, 8); - - /* Swap some of the state bytes to make the rate bytes contiguous */ - sliscp_light320_swap(state); - - /* Run the permutation to scramble the initial state */ - sliscp_light320_permute(state); - - /* Absorb the key in two further permutation operations */ - lw_xor_block(state, k, 8); - sliscp_light320_permute(state); - lw_xor_block(state, k + 8, 8); - sliscp_light320_permute(state); - - /* Absorb the associated data into the state */ - if (adlen != 0) { - while (adlen >= ACE_RATE) { - lw_xor_block(state, ad, ACE_RATE); - state[ACE_STATE_SIZE - 1] ^= 0x01; /* domain separation */ - sliscp_light320_permute(state); - ad += ACE_RATE; - adlen -= ACE_RATE; - } - temp = (unsigned)adlen; - lw_xor_block(state, ad, temp); - state[temp] ^= 0x80; /* padding */ - state[ACE_STATE_SIZE - 1] ^= 0x01; /* domain separation */ - sliscp_light320_permute(state); - } -} - -/** - * \brief Finalizes the ACE encryption or decryption operation. - * - * \param state ACE permutation state. - * \param k Points to the 128-bit key. - * \param tag Points to the 16 byte buffer to receive the computed tag. - */ -static void ace_finalize - (unsigned char state[ACE_STATE_SIZE], const unsigned char *k, - unsigned char *tag) -{ - /* Absorb the key into the state again */ - lw_xor_block(state, k, 8); - sliscp_light320_permute(state); - lw_xor_block(state, k + 8, 8); - sliscp_light320_permute(state); - - /* Swap the state bytes back to the canonical order */ - sliscp_light320_swap(state); - - /* Copy out the authentication tag */ - memcpy(tag, state, 8); - memcpy(tag + 8, state + 16, 8); -} - -int ace_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[ACE_STATE_SIZE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ACE_TAG_SIZE; - - /* Initialize the ACE state and absorb the associated data */ - ace_init(state, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= ACE_RATE) { - lw_xor_block_2_dest(c, state, m, ACE_RATE); - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += ACE_RATE; - m += ACE_RATE; - mlen -= ACE_RATE; - } - temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state, m, temp); - state[temp] ^= 0x80; /* padding */ - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += mlen; - - /* Generate the authentication tag */ - ace_finalize(state, k, c); - return 0; -} - -int ace_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[ACE_STATE_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ACE_TAG_SIZE) - return -1; - *mlen = clen - ACE_TAG_SIZE; - - /* Initialize the ACE state and absorb the associated data */ - ace_init(state, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ACE_TAG_SIZE; - while (clen >= ACE_RATE) { - lw_xor_block_swap(m, state, c, ACE_RATE); - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += ACE_RATE; - m += ACE_RATE; - clen -= ACE_RATE; - } - temp = (unsigned)clen; - lw_xor_block_swap(m, state, c, temp); - state[temp] ^= 0x80; /* padding */ - state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light320_permute(state); - c += clen; - - /* Finalize the ACE state and compare against the authentication tag */ - ace_finalize(state, k, state); - return aead_check_tag(mtemp, *mlen, state, c, ACE_TAG_SIZE); -} - -/* Pre-hashed version of the ACE-HASH initialization vector */ -static unsigned char const ace_hash_iv[ACE_STATE_SIZE] = { - 0xb9, 0x7d, 0xda, 0x3f, 0x66, 0x2c, 0xd1, 0xa6, - 0x65, 0xd1, 0x80, 0xd6, 0x49, 0xdc, 0xa1, 0x8c, - 0x0c, 0x5f, 0x0e, 0xca, 0x70, 0x37, 0x58, 0x75, - 0x29, 0x7d, 0xb0, 0xb0, 0x72, 0x73, 0xce, 0xa8, - 0x99, 0x71, 0xde, 0x8a, 0x9a, 0x65, 0x72, 0x24 -}; - -int ace_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[ACE_STATE_SIZE]; - unsigned temp; - - /* Load the initialization vector and hash it, which can be pre-computed */ - /* - memset(state, 0, sizeof(state)); - state[8] = 0x80; - state[9] = 0x40; - state[10] = 0x40; - sliscp_light320_swap(state); - sliscp_light320_permute(state); - */ - memcpy(state, ace_hash_iv, ACE_STATE_SIZE); - - /* Absorb the input data */ - while (inlen >= ACE_RATE) { - lw_xor_block(state, in, ACE_RATE); - sliscp_light320_permute(state); - in += ACE_RATE; - inlen -= ACE_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state, in, temp); - state[temp] ^= 0x80; /* padding */ - sliscp_light320_permute(state); - - /* Squeeze out the hash value */ - memcpy(out, state, 8); - for (temp = 0; temp < 3; ++temp) { - out += 8; - sliscp_light320_permute(state); - memcpy(out, state, 8); - } - return 0; -} - -void ace_hash_init(ace_hash_state_t *state) -{ - memcpy(state->s.state, ace_hash_iv, ACE_STATE_SIZE); - state->s.count = 0; -} - -void ace_hash_update - (ace_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned len; - - /* Handle the left-over rate block from last time */ - if (state->s.count != 0) { - len = ACE_RATE - state->s.count; - if (len > inlen) - len = (unsigned)inlen; - lw_xor_block(state->s.state + state->s.count, in, len); - in += len; - inlen -= len; - state->s.count += len; - if (state->s.count >= ACE_RATE) { - sliscp_light320_permute(state->s.state); - state->s.count = 0; - } else { - /* Not enough input data yet to fill up the whole block */ - return; - } - } - - /* Process as many full rate blocks as we can */ - while (inlen >= ACE_RATE) { - lw_xor_block(state->s.state, in, ACE_RATE); - sliscp_light320_permute(state->s.state); - in += ACE_RATE; - inlen -= ACE_RATE; - } - - /* Handle any left-over data */ - len = (unsigned)inlen; - lw_xor_block(state->s.state, in, len); - state->s.count = len; -} - -void ace_hash_finalize(ace_hash_state_t *state, unsigned char *out) -{ - unsigned temp; - - /* Pad and hash the final input block */ - state->s.state[state->s.count] ^= 0x80; - sliscp_light320_permute(state->s.state); - state->s.count = 0; - - /* Squeeze out the hash value */ - memcpy(out, state->s.state, 9); - for (temp = 0; temp < 3; ++temp) { - out += 8; - sliscp_light320_permute(state->s.state); - memcpy(out, state->s.state, 8); - } -} diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.h b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.h deleted file mode 100644 index 4497927..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/ace.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ACE_H -#define LWCRYPTO_ACE_H - -#include "aead-common.h" - -/** - * \file ace.h - * \brief ACE authenticated encryption algorithm. - * - * ACE is an authenticated encryption algorithm with a 128-bit key, - * a 128-bit nonce, and a 128-bit tag. It uses a duplex construction - * on top of a 320-bit permutation. The permutation is a generalised - * version of sLiSCP-light, extended from 256 bits to 320 bits. - * ACE also has a companion hash algorithm with a 256-bit output. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/ace - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ACE. - */ -#define ACE_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for ACE. - */ -#define ACE_TAG_SIZE 16 - -/** - * \brief Size of the nonce for ACE. - */ -#define ACE_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for ACE-HASH. - */ -#define ACE_HASH_SIZE 32 - -/** - * \brief Meta-information block for the ACE cipher. - */ -extern aead_cipher_t const ace_cipher; - -/** - * \brief Meta-information block for the ACE-HASH hash algorithm. - */ -extern aead_hash_algorithm_t const ace_hash_algorithm; - -/** - * \brief State information for the ACE-HASH incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[40]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} ace_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with ACE. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ace_aead_decrypt() - */ -int ace_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ACE. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ace_aead_encrypt() - */ -int ace_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ACE-HASH to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ACE_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int ace_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ACE-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ace_hash_update(), ace_hash_finalize(), ace_hash() - */ -void ace_hash_init(ace_hash_state_t *state); - -/** - * \brief Updates the ACE-HASH state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa ace_hash_init(), ace_hash_finalize() - */ -void ace_hash_update - (ace_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an ACE-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa ace_hash_init(), ace_hash_update() - */ -void ace_hash_finalize(ace_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.c b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.h b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/api.h b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/hash.c b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/hash.c deleted file mode 100644 index 388f638..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "ace.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return ace_hash(out, in, inlen); -} diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-320-avr.S b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-320-avr.S deleted file mode 100644 index 2522d5c..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-320-avr.S +++ /dev/null @@ -1,1767 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 96 -table_0: - .byte 7 - .byte 83 - .byte 67 - .byte 80 - .byte 40 - .byte 20 - .byte 10 - .byte 93 - .byte 228 - .byte 92 - .byte 174 - .byte 87 - .byte 155 - .byte 73 - .byte 94 - .byte 145 - .byte 72 - .byte 36 - .byte 224 - .byte 127 - .byte 204 - .byte 141 - .byte 198 - .byte 99 - .byte 209 - .byte 190 - .byte 50 - .byte 83 - .byte 169 - .byte 84 - .byte 26 - .byte 29 - .byte 78 - .byte 96 - .byte 48 - .byte 24 - .byte 34 - .byte 40 - .byte 117 - .byte 104 - .byte 52 - .byte 154 - .byte 247 - .byte 108 - .byte 37 - .byte 225 - .byte 112 - .byte 56 - .byte 98 - .byte 130 - .byte 253 - .byte 246 - .byte 123 - .byte 189 - .byte 150 - .byte 71 - .byte 249 - .byte 157 - .byte 206 - .byte 103 - .byte 113 - .byte 107 - .byte 118 - .byte 64 - .byte 32 - .byte 16 - .byte 170 - .byte 136 - .byte 160 - .byte 79 - .byte 39 - .byte 19 - .byte 43 - .byte 220 - .byte 176 - .byte 190 - .byte 95 - .byte 47 - .byte 233 - .byte 139 - .byte 9 - .byte 91 - .byte 173 - .byte 214 - .byte 207 - .byte 89 - .byte 30 - .byte 233 - .byte 116 - .byte 186 - .byte 183 - .byte 198 - .byte 173 - .byte 127 - .byte 63 - .byte 31 - - .text -.global sliscp_light320_permute - .type sliscp_light320_permute, @function -sliscp_light320_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 34 - ldd r21,Z+8 - ldd r20,Z+9 - ldd r19,Z+10 - ldd r18,Z+11 - ldd r27,Z+12 - ldd r26,Z+13 - ldd r23,Z+14 - ldd r22,Z+15 - ldd r5,Z+24 - ldd r4,Z+25 - ldd r3,Z+26 - ldd r2,Z+27 - ldd r9,Z+28 - ldd r8,Z+29 - ldd r7,Z+30 - ldd r6,Z+31 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r26 - std Y+8,r27 - std Y+9,r2 - std Y+10,r3 - std Y+11,r4 - std Y+12,r5 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ld r21,Z - ldd r20,Z+1 - ldd r19,Z+2 - ldd r18,Z+3 - ldd r27,Z+16 - ldd r26,Z+17 - ldd r23,Z+18 - ldd r22,Z+19 - ldd r5,Z+4 - ldd r4,Z+5 - ldd r3,Z+6 - ldd r2,Z+7 - ldd r9,Z+20 - ldd r8,Z+21 - ldd r7,Z+22 - ldd r6,Z+23 - ldd r13,Z+32 - ldd r12,Z+33 - ldd r11,Z+34 - ldd r10,Z+35 - ldd r25,Z+36 - ldd r24,Z+37 - ldd r15,Z+38 - ldd r14,Z+39 - push r31 - push r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r16,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r16 -#endif - ldi r30,0 -60: -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - inc r30 - push r30 - mov r30,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - movw r16,r18 - mov r1,r20 - mov r0,r21 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r18 - and r17,r19 - and r1,r20 - and r0,r21 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - com r23 - com r26 - com r27 - ldi r16,255 - lsr r30 - rol r16 - eor r22,r16 - movw r16,r22 - mov r1,r26 - mov r0,r27 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r22 - and r17,r23 - and r1,r26 - and r0,r27 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - com r19 - com r20 - com r21 - ldi r16,255 - lsr r30 - rol r16 - eor r18,r16 - pop r30 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - inc r30 - push r30 - mov r30,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r4 - mov r0,r5 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r4 - and r0,r5 - eor r6,r16 - eor r7,r17 - eor r8,r1 - eor r9,r0 - com r7 - com r8 - com r9 - ldi r16,255 - lsr r30 - rol r16 - eor r6,r16 - movw r16,r6 - mov r1,r8 - mov r0,r9 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r6 - and r17,r7 - and r1,r8 - and r0,r9 - eor r2,r16 - eor r3,r17 - eor r4,r1 - eor r5,r0 - com r3 - com r4 - com r5 - ldi r16,255 - lsr r30 - rol r16 - eor r2,r16 - pop r30 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - inc r30 - push r30 - mov r30,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - mov r0,r13 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - and r0,r13 - eor r14,r16 - eor r15,r17 - eor r24,r1 - eor r25,r0 - com r15 - com r24 - com r25 - ldi r16,255 - lsr r30 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r24 - mov r0,r25 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - bst r0,7 - lsl r16 - rol r17 - rol r1 - rol r0 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r24 - and r0,r25 - eor r10,r16 - eor r11,r17 - eor r12,r1 - eor r13,r0 - com r11 - com r12 - com r13 - ldi r16,255 - lsr r30 - rol r16 - eor r10,r16 - pop r30 - ldd r16,Y+1 - ldd r17,Y+2 - ldd r1,Y+3 - ldd r0,Y+4 - eor r16,r2 - eor r17,r3 - eor r1,r4 - eor r0,r5 - com r16 - com r17 - com r1 - com r0 - std Y+1,r16 - std Y+2,r17 - std Y+3,r1 - std Y+4,r0 - ldd r16,Y+5 - ldd r17,Y+6 - ldd r1,Y+7 - ldd r0,Y+8 - eor r16,r6 - eor r17,r7 - eor r1,r8 - eor r0,r9 - com r17 - com r1 - com r0 - std Y+6,r17 - std Y+7,r1 - std Y+8,r0 -#if defined(RAMPZ) - elpm r0,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r0,Z -#elif defined(__AVR_TINY__) - ld r0,Z -#else - lpm -#endif - inc r30 - eor r16,r0 - std Y+5,r16 - ldd r16,Y+9 - ldd r17,Y+10 - ldd r1,Y+11 - ldd r0,Y+12 - eor r16,r10 - eor r17,r11 - eor r1,r12 - eor r0,r13 - com r16 - com r17 - com r1 - com r0 - std Y+9,r16 - std Y+10,r17 - std Y+11,r1 - std Y+12,r0 - ldd r16,Y+13 - ldd r17,Y+14 - ldd r1,Y+15 - ldd r0,Y+16 - eor r16,r14 - eor r17,r15 - eor r1,r24 - eor r0,r25 - com r17 - com r1 - com r0 - std Y+14,r17 - std Y+15,r1 - std Y+16,r0 -#if defined(RAMPZ) - elpm r0,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r0,Z -#elif defined(__AVR_TINY__) - ld r0,Z -#else - lpm -#endif - inc r30 - eor r16,r0 - std Y+13,r16 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - com r10 - com r11 - com r12 - com r13 - eor r14,r22 - eor r15,r23 - eor r24,r26 - eor r25,r27 - com r15 - com r24 - com r25 -#if defined(RAMPZ) - elpm r0,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r0,Z -#elif defined(__AVR_TINY__) - ld r0,Z -#else - lpm -#endif - inc r30 - eor r14,r0 - movw r16,r10 - mov r1,r12 - mov r0,r13 - ldd r10,Y+1 - ldd r11,Y+2 - ldd r12,Y+3 - ldd r13,Y+4 - std Y+1,r2 - std Y+2,r3 - std Y+3,r4 - std Y+4,r5 - movw r2,r18 - movw r4,r20 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - std Y+9,r16 - std Y+10,r17 - std Y+11,r1 - std Y+12,r0 - movw r16,r14 - mov r1,r24 - mov r0,r25 - ldd r14,Y+5 - ldd r15,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - movw r6,r22 - movw r8,r26 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - std Y+13,r16 - std Y+14,r17 - std Y+15,r1 - std Y+16,r0 - ldi r17,96 - cpse r30,r17 - rjmp 60b -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r21 - std Z+1,r20 - std Z+2,r19 - std Z+3,r18 - std Z+16,r27 - std Z+17,r26 - std Z+18,r23 - std Z+19,r22 - std Z+4,r5 - std Z+5,r4 - std Z+6,r3 - std Z+7,r2 - std Z+20,r9 - std Z+21,r8 - std Z+22,r7 - std Z+23,r6 - std Z+32,r13 - std Z+33,r12 - std Z+34,r11 - std Z+35,r10 - std Z+36,r25 - std Z+37,r24 - std Z+38,r15 - std Z+39,r14 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - ldd r2,Y+9 - ldd r3,Y+10 - ldd r4,Y+11 - ldd r5,Y+12 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - std Z+8,r21 - std Z+9,r20 - std Z+10,r19 - std Z+11,r18 - std Z+12,r27 - std Z+13,r26 - std Z+14,r23 - std Z+15,r22 - std Z+24,r5 - std Z+25,r4 - std Z+26,r3 - std Z+27,r2 - std Z+28,r9 - std Z+29,r8 - std Z+30,r7 - std Z+31,r6 - adiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - eor r1,r1 - ret - .size sliscp_light320_permute, .-sliscp_light320_permute - - .text -.global sliscp_light320_swap - .type sliscp_light320_swap, @function -sliscp_light320_swap: - movw r30,r24 -.L__stack_usage = 2 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - ret - .size sliscp_light320_swap, .-sliscp_light320_swap - -#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.c b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.c deleted file mode 100644 index dd3a688..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sliscp-light.h" - -#if !defined(__AVR__) - -/** - * \brief Performs one round of the Simeck-64 block cipher. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - */ -#define simeck64_round(x, y) \ - do { \ - (y) ^= (leftRotate5((x)) & (x)) ^ leftRotate1((x)) ^ \ - 0xFFFFFFFEU ^ (_rc & 1); \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 64-bit block with the 8 round version of Simeck-64. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck64_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck64_round(x, y); /* Round 1 */ \ - simeck64_round(y, x); /* Round 2 */ \ - simeck64_round(x, y); /* Round 3 */ \ - simeck64_round(y, x); /* Round 4 */ \ - simeck64_round(x, y); /* Round 5 */ \ - simeck64_round(y, x); /* Round 6 */ \ - simeck64_round(x, y); /* Round 7 */ \ - simeck64_round(y, x); /* Round 8 */ \ - } while (0) - -/* Helper macros for 48-bit left rotations */ -#define leftRotate5_48(x) (((x) << 5) | ((x) >> 19)) -#define leftRotate1_48(x) (((x) << 1) | ((x) >> 23)) - -/** - * \brief Performs one round of the Simeck-48 block cipher. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - */ -#define simeck48_round(x, y) \ - do { \ - (y) ^= (leftRotate5_48((x)) & (x)) ^ leftRotate1_48((x)) ^ \ - 0x00FFFFFEU ^ (_rc & 1); \ - (y) &= 0x00FFFFFFU; \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 48-bit block with the 6 round version of Simeck-48. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck48_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck48_round(x, y); /* Round 1 */ \ - simeck48_round(y, x); /* Round 2 */ \ - simeck48_round(x, y); /* Round 3 */ \ - simeck48_round(y, x); /* Round 4 */ \ - simeck48_round(x, y); /* Round 5 */ \ - simeck48_round(y, x); /* Round 6 */ \ - } while (0) - -/* Interleaved rc0, rc1, sc0, and sc1 values for each round */ -static unsigned char const sliscp_light256_RC[18 * 4] = { - 0x0f, 0x47, 0x08, 0x64, 0x04, 0xb2, 0x86, 0x6b, - 0x43, 0xb5, 0xe2, 0x6f, 0xf1, 0x37, 0x89, 0x2c, - 0x44, 0x96, 0xe6, 0xdd, 0x73, 0xee, 0xca, 0x99, - 0xe5, 0x4c, 0x17, 0xea, 0x0b, 0xf5, 0x8e, 0x0f, - 0x47, 0x07, 0x64, 0x04, 0xb2, 0x82, 0x6b, 0x43, - 0xb5, 0xa1, 0x6f, 0xf1, 0x37, 0x78, 0x2c, 0x44, - 0x96, 0xa2, 0xdd, 0x73, 0xee, 0xb9, 0x99, 0xe5, - 0x4c, 0xf2, 0xea, 0x0b, 0xf5, 0x85, 0x0f, 0x47, - 0x07, 0x23, 0x04, 0xb2, 0x82, 0xd9, 0x43, 0xb5 -}; - -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 24); /* Assumes the block is pre-swapped */ - x4 = be_load_word32(block + 16); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 12); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 8, x2); - be_store_word32(block + 24, x3); /* Assumes the block is pre-swapped */ - be_store_word32(block + 16, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 12, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spix(unsigned char block[32]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 12); - t2 = le_load_word32(block + 24); - le_store_word32(block + 24, t1); - le_store_word32(block + 12, t2); -} - -void sliscp_light256_permute_spoc(unsigned char block[32]) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x3 = be_load_word32(block + 20); - x4 = be_load_word32(block + 8); - x5 = be_load_word32(block + 12); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 16, x2); /* Assumes the block is pre-swapped */ - be_store_word32(block + 20, x3); - be_store_word32(block + 8, x4); - be_store_word32(block + 12, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spoc(unsigned char block[32]) -{ - uint64_t t1, t2; - t1 = le_load_word64(block + 8); - t2 = le_load_word64(block + 16); - le_store_word64(block + 16, t1); - le_store_word64(block + 8, t2); -} - -/* Load a big-endian 24-bit word from a byte buffer */ -#define be_load_word24(ptr) \ - ((((uint32_t)((ptr)[0])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[2]))) - -/* Store a big-endian 24-bit word into a byte buffer */ -#define be_store_word24(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 16); \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)_x; \ - } while (0) - -void sliscp_light192_permute(unsigned char block[24]) -{ - /* Interleaved rc0, rc1, sc0, and sc1 values for each round */ - static unsigned char const RC[18 * 4] = { - 0x07, 0x27, 0x08, 0x29, 0x04, 0x34, 0x0c, 0x1d, - 0x06, 0x2e, 0x0a, 0x33, 0x25, 0x19, 0x2f, 0x2a, - 0x17, 0x35, 0x38, 0x1f, 0x1c, 0x0f, 0x24, 0x10, - 0x12, 0x08, 0x36, 0x18, 0x3b, 0x0c, 0x0d, 0x14, - 0x26, 0x0a, 0x2b, 0x1e, 0x15, 0x2f, 0x3e, 0x31, - 0x3f, 0x38, 0x01, 0x09, 0x20, 0x24, 0x21, 0x2d, - 0x30, 0x36, 0x11, 0x1b, 0x28, 0x0d, 0x39, 0x16, - 0x3c, 0x2b, 0x05, 0x3d, 0x22, 0x3e, 0x27, 0x03, - 0x13, 0x01, 0x34, 0x02, 0x1a, 0x21, 0x2e, 0x23 - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables. Each 24-bit block is - * placed into a separate 32-bit word which improves efficiency below */ - x0 = be_load_word24(block); - x1 = be_load_word24(block + 3); - x2 = be_load_word24(block + 6); - x3 = be_load_word24(block + 9); - x4 = be_load_word24(block + 12); - x5 = be_load_word24(block + 15); - x6 = be_load_word24(block + 18); - x7 = be_load_word24(block + 21); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-48 to two of the 48-bit sub-blocks */ - simeck48_box(x2, x3, rc[0]); - simeck48_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0x00FFFFFFU; - x1 ^= 0x00FFFF00U ^ rc[2]; - x4 ^= 0x00FFFFFFU; - x5 ^= 0x00FFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word24(block, x0); - be_store_word24(block + 3, x1); - be_store_word24(block + 6, x2); - be_store_word24(block + 9, x3); - be_store_word24(block + 12, x4); - be_store_word24(block + 15, x5); - be_store_word24(block + 18, x6); - be_store_word24(block + 21, x7); -} - -void sliscp_light320_permute(unsigned char block[40]) -{ - /* Interleaved rc0, rc1, rc2, sc0, sc1, and sc2 values for each round */ - static unsigned char const RC[16 * 6] = { - 0x07, 0x53, 0x43, 0x50, 0x28, 0x14, 0x0a, 0x5d, - 0xe4, 0x5c, 0xae, 0x57, 0x9b, 0x49, 0x5e, 0x91, - 0x48, 0x24, 0xe0, 0x7f, 0xcc, 0x8d, 0xc6, 0x63, - 0xd1, 0xbe, 0x32, 0x53, 0xa9, 0x54, 0x1a, 0x1d, - 0x4e, 0x60, 0x30, 0x18, 0x22, 0x28, 0x75, 0x68, - 0x34, 0x9a, 0xf7, 0x6c, 0x25, 0xe1, 0x70, 0x38, - 0x62, 0x82, 0xfd, 0xf6, 0x7b, 0xbd, 0x96, 0x47, - 0xf9, 0x9d, 0xce, 0x67, 0x71, 0x6b, 0x76, 0x40, - 0x20, 0x10, 0xaa, 0x88, 0xa0, 0x4f, 0x27, 0x13, - 0x2b, 0xdc, 0xb0, 0xbe, 0x5f, 0x2f, 0xe9, 0x8b, - 0x09, 0x5b, 0xad, 0xd6, 0xcf, 0x59, 0x1e, 0xe9, - 0x74, 0xba, 0xb7, 0xc6, 0xad, 0x7f, 0x3f, 0x1f - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 12); - x4 = be_load_word32(block + 4); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - x8 = be_load_word32(block + 32); - x9 = be_load_word32(block + 36); - - /* Perform all permutation rounds */ - for (round = 0; round < 16; ++round, rc += 6) { - /* Apply Simeck-64 to three of the 64-bit sub-blocks */ - simeck64_box(x0, x1, rc[0]); - simeck64_box(x4, x5, rc[1]); - simeck64_box(x8, x9, rc[2]); - x6 ^= x8; - x7 ^= x9; - x2 ^= x4; - x3 ^= x5; - x8 ^= x0; - x9 ^= x1; - - /* Add step constants */ - x2 ^= 0xFFFFFFFFU; - x3 ^= 0xFFFFFF00U ^ rc[3]; - x6 ^= 0xFFFFFFFFU; - x7 ^= 0xFFFFFF00U ^ rc[4]; - x8 ^= 0xFFFFFFFFU; - x9 ^= 0xFFFFFF00U ^ rc[5]; - - /* Rotate the sub-blocks */ - t0 = x8; - t1 = x9; - x8 = x2; - x9 = x3; - x2 = x4; - x3 = x5; - x4 = x0; - x5 = x1; - x0 = x6; - x1 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 16, x1); /* Assumes the block is pre-swapped */ - be_store_word32(block + 8, x2); - be_store_word32(block + 12, x3); - be_store_word32(block + 4, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); - be_store_word32(block + 32, x8); - be_store_word32(block + 36, x9); -} - -void sliscp_light320_swap(unsigned char block[40]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 4); - t2 = le_load_word32(block + 16); - le_store_word32(block + 16, t1); - le_store_word32(block + 4, t2); -} - -#endif /* !__AVR__ */ diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.h b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.h deleted file mode 100644 index 8a5e8d5..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-sliscp-light.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SLISCP_LIGHT_H -#define LW_INTERNAL_SLISCP_LIGHT_H - -/** - * \file internal-sliscp-light.h - * \brief sLiSCP-light permutation - * - * There are three variants of sLiSCP-light in use in the NIST submissions: - * - * \li sLiSCP-light-256 with a 256-bit block size, used in SPIX and SpoC. - * \li sLiSCP-light-192 with a 192-bit block size, used in SpoC. - * \li sLiSCP-light-320 with a 320-bit block size, used in ACE. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/ace, - * https://uwaterloo.ca/communications-security-lab/lwc/spix, - * https://uwaterloo.ca/communications-security-lab/lwc/spoc - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for sLiSCP-light-256. - */ -#define SLISCP_LIGHT256_STATE_SIZE 32 - -/** - * \brief Size of the state for sLiSCP-light-192. - */ -#define SLISCP_LIGHT192_STATE_SIZE 24 - -/** - * \brief Size of the state for sLiSCP-light-320. - */ -#define SLISCP_LIGHT320_STATE_SIZE 40 - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SPIX cipher. SPIX places the rate bytes at - * positions 8, 9, 10, 11, 24, 25, 26, and 27. - * - * This function assumes that bytes 24-27 have been pre-swapped with - * bytes 12-15 so that the rate portion of the state is contiguous. - * - * The sliscp_light256_swap_spix() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spix() - */ -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SPIX. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spix() - */ -void sliscp_light256_swap_spix(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SpoC-128 cipher. SpoC-128 interleaves the - * rate bytes and the mask bytes. This version assumes that the - * rate and mask are in contiguous bytes of the state. - * - * SpoC-128 absorbs bytes using the mask bytes of the state at offsets - * 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, and 31. - * It squeezes bytes using the rate bytes of the state at offsets - * 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, and 23. - * - * This function assumes that bytes 8-15 have been pre-swapped with 16-23 - * so that the rate and mask portions of the state are contiguous. - * - * The sliscp_light256_swap_spoc() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spoc() - */ -void sliscp_light256_permute_spoc(unsigned char block[32]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spoc() - */ -void sliscp_light256_swap_spoc(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 192-bit block. - * - * \param block Points to the block to be permuted. - */ -void sliscp_light192_permute(unsigned char block[24]); - -/** - * \brief Performs the sLiSCP-light permutation on a 320-bit block. - * - * \param block Points to the block to be permuted. - * - * The ACE specification refers to this permutation as "ACE" but that - * can be confused with the name of the AEAD mode so we call this - * permutation "sLiSCP-light-320" instead. - * - * ACE absorbs and squeezes data at the rate bytes 0, 1, 2, 3, 16, 17, 18, 19. - * Efficiency can suffer because of the discontinuity in rate byte positions. - * - * To counteract this, we assume that the input to the permutation has been - * pre-swapped: bytes 4, 5, 6, 7 are swapped with bytes 16, 17, 18, 19 so - * that the rate is contiguous at the start of the state. - * - * The sliscp_light320_swap() function can be used to switch between the - * canonical order and the pre-swapped order. - * - * \sa sliscp_light320_swap() - */ -void sliscp_light320_permute(unsigned char block[40]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 320-bit block. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light320_permute() - */ -void sliscp_light320_swap(unsigned char block[40]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-util.h b/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/ace/Implementations/crypto_hash/acehash256v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/ace.c b/ace/Implementations/crypto_hash/acehash256v1/rhys/ace.c new file mode 100644 index 0000000..7a68306 --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/ace.c @@ -0,0 +1,339 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ace.h" +#include "internal-sliscp-light.h" +#include "internal-util.h" +#include + +/** + * \brief Size of the state for the internal ACE permutation. + */ +#define ACE_STATE_SIZE SLISCP_LIGHT320_STATE_SIZE + +/** + * \brief Rate for absorbing data into the ACE state and for + * squeezing data out again. + */ +#define ACE_RATE 8 + +aead_cipher_t const ace_cipher = { + "ACE", + ACE_KEY_SIZE, + ACE_NONCE_SIZE, + ACE_TAG_SIZE, + AEAD_FLAG_NONE, + ace_aead_encrypt, + ace_aead_decrypt +}; + +aead_hash_algorithm_t const ace_hash_algorithm = { + "ACE-HASH", + sizeof(ace_hash_state_t), + ACE_HASH_SIZE, + AEAD_FLAG_NONE, + ace_hash, + (aead_hash_init_t)ace_hash_init, + (aead_hash_update_t)ace_hash_update, + (aead_hash_finalize_t)ace_hash_finalize, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/* Indices of where a rate byte is located in the state. We don't + * need this array any more because sliscp_light320_permute() operates + * on byte-swapped states where the rate bytes are contiguous in the + * first 8 bytes */ +/* +static unsigned char const ace_rate_posn[8] = { + 0, 1, 2, 3, 16, 17, 18, 19 +}; +*/ + +/** + * \brief Initializes the ACE state. + * + * \param state ACE permutation state. + * \param k Points to the 128-bit key. + * \param npub Points to the 128-bit nonce. + * \param ad Points to the associated data. + * \param adlen Length of the associated data in bytes. + */ +static void ace_init + (unsigned char state[ACE_STATE_SIZE], + const unsigned char *k, const unsigned char *npub, + const unsigned char *ad, unsigned long long adlen) +{ + unsigned temp; + + /* Initialize the state by interleaving the key and nonce */ + memcpy(state, k, 8); + memcpy(state + 8, npub, 8); + memcpy(state + 16, k + 8, 8); + memset(state + 24, 0, 8); + memcpy(state + 32, npub + 8, 8); + + /* Swap some of the state bytes to make the rate bytes contiguous */ + sliscp_light320_swap(state); + + /* Run the permutation to scramble the initial state */ + sliscp_light320_permute(state); + + /* Absorb the key in two further permutation operations */ + lw_xor_block(state, k, 8); + sliscp_light320_permute(state); + lw_xor_block(state, k + 8, 8); + sliscp_light320_permute(state); + + /* Absorb the associated data into the state */ + if (adlen != 0) { + while (adlen >= ACE_RATE) { + lw_xor_block(state, ad, ACE_RATE); + state[ACE_STATE_SIZE - 1] ^= 0x01; /* domain separation */ + sliscp_light320_permute(state); + ad += ACE_RATE; + adlen -= ACE_RATE; + } + temp = (unsigned)adlen; + lw_xor_block(state, ad, temp); + state[temp] ^= 0x80; /* padding */ + state[ACE_STATE_SIZE - 1] ^= 0x01; /* domain separation */ + sliscp_light320_permute(state); + } +} + +/** + * \brief Finalizes the ACE encryption or decryption operation. + * + * \param state ACE permutation state. + * \param k Points to the 128-bit key. + * \param tag Points to the 16 byte buffer to receive the computed tag. + */ +static void ace_finalize + (unsigned char state[ACE_STATE_SIZE], const unsigned char *k, + unsigned char *tag) +{ + /* Absorb the key into the state again */ + lw_xor_block(state, k, 8); + sliscp_light320_permute(state); + lw_xor_block(state, k + 8, 8); + sliscp_light320_permute(state); + + /* Swap the state bytes back to the canonical order */ + sliscp_light320_swap(state); + + /* Copy out the authentication tag */ + memcpy(tag, state, 8); + memcpy(tag + 8, state + 16, 8); +} + +int ace_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[ACE_STATE_SIZE]; + unsigned temp; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + ACE_TAG_SIZE; + + /* Initialize the ACE state and absorb the associated data */ + ace_init(state, k, npub, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + while (mlen >= ACE_RATE) { + lw_xor_block_2_dest(c, state, m, ACE_RATE); + state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ + sliscp_light320_permute(state); + c += ACE_RATE; + m += ACE_RATE; + mlen -= ACE_RATE; + } + temp = (unsigned)mlen; + lw_xor_block_2_dest(c, state, m, temp); + state[temp] ^= 0x80; /* padding */ + state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ + sliscp_light320_permute(state); + c += mlen; + + /* Generate the authentication tag */ + ace_finalize(state, k, c); + return 0; +} + +int ace_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[ACE_STATE_SIZE]; + unsigned char *mtemp = m; + unsigned temp; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < ACE_TAG_SIZE) + return -1; + *mlen = clen - ACE_TAG_SIZE; + + /* Initialize the ACE state and absorb the associated data */ + ace_init(state, k, npub, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= ACE_TAG_SIZE; + while (clen >= ACE_RATE) { + lw_xor_block_swap(m, state, c, ACE_RATE); + state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ + sliscp_light320_permute(state); + c += ACE_RATE; + m += ACE_RATE; + clen -= ACE_RATE; + } + temp = (unsigned)clen; + lw_xor_block_swap(m, state, c, temp); + state[temp] ^= 0x80; /* padding */ + state[ACE_STATE_SIZE - 1] ^= 0x02; /* domain separation */ + sliscp_light320_permute(state); + c += clen; + + /* Finalize the ACE state and compare against the authentication tag */ + ace_finalize(state, k, state); + return aead_check_tag(mtemp, *mlen, state, c, ACE_TAG_SIZE); +} + +/* Pre-hashed version of the ACE-HASH initialization vector */ +static unsigned char const ace_hash_iv[ACE_STATE_SIZE] = { + 0xb9, 0x7d, 0xda, 0x3f, 0x66, 0x2c, 0xd1, 0xa6, + 0x65, 0xd1, 0x80, 0xd6, 0x49, 0xdc, 0xa1, 0x8c, + 0x0c, 0x5f, 0x0e, 0xca, 0x70, 0x37, 0x58, 0x75, + 0x29, 0x7d, 0xb0, 0xb0, 0x72, 0x73, 0xce, 0xa8, + 0x99, 0x71, 0xde, 0x8a, 0x9a, 0x65, 0x72, 0x24 +}; + +int ace_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char state[ACE_STATE_SIZE]; + unsigned temp; + + /* Load the initialization vector and hash it, which can be pre-computed */ + /* + memset(state, 0, sizeof(state)); + state[8] = 0x80; + state[9] = 0x40; + state[10] = 0x40; + sliscp_light320_swap(state); + sliscp_light320_permute(state); + */ + memcpy(state, ace_hash_iv, ACE_STATE_SIZE); + + /* Absorb the input data */ + while (inlen >= ACE_RATE) { + lw_xor_block(state, in, ACE_RATE); + sliscp_light320_permute(state); + in += ACE_RATE; + inlen -= ACE_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state, in, temp); + state[temp] ^= 0x80; /* padding */ + sliscp_light320_permute(state); + + /* Squeeze out the hash value */ + memcpy(out, state, 8); + for (temp = 0; temp < 3; ++temp) { + out += 8; + sliscp_light320_permute(state); + memcpy(out, state, 8); + } + return 0; +} + +void ace_hash_init(ace_hash_state_t *state) +{ + memcpy(state->s.state, ace_hash_iv, ACE_STATE_SIZE); + state->s.count = 0; +} + +void ace_hash_update + (ace_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + unsigned len; + + /* Handle the left-over rate block from last time */ + if (state->s.count != 0) { + len = ACE_RATE - state->s.count; + if (len > inlen) + len = (unsigned)inlen; + lw_xor_block(state->s.state + state->s.count, in, len); + in += len; + inlen -= len; + state->s.count += len; + if (state->s.count >= ACE_RATE) { + sliscp_light320_permute(state->s.state); + state->s.count = 0; + } else { + /* Not enough input data yet to fill up the whole block */ + return; + } + } + + /* Process as many full rate blocks as we can */ + while (inlen >= ACE_RATE) { + lw_xor_block(state->s.state, in, ACE_RATE); + sliscp_light320_permute(state->s.state); + in += ACE_RATE; + inlen -= ACE_RATE; + } + + /* Handle any left-over data */ + len = (unsigned)inlen; + lw_xor_block(state->s.state, in, len); + state->s.count = len; +} + +void ace_hash_finalize(ace_hash_state_t *state, unsigned char *out) +{ + unsigned temp; + + /* Pad and hash the final input block */ + state->s.state[state->s.count] ^= 0x80; + sliscp_light320_permute(state->s.state); + state->s.count = 0; + + /* Squeeze out the hash value */ + memcpy(out, state->s.state, 9); + for (temp = 0; temp < 3; ++temp) { + out += 8; + sliscp_light320_permute(state->s.state); + memcpy(out, state->s.state, 8); + } +} diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/ace.h b/ace/Implementations/crypto_hash/acehash256v1/rhys/ace.h new file mode 100644 index 0000000..4497927 --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/ace.h @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_ACE_H +#define LWCRYPTO_ACE_H + +#include "aead-common.h" + +/** + * \file ace.h + * \brief ACE authenticated encryption algorithm. + * + * ACE is an authenticated encryption algorithm with a 128-bit key, + * a 128-bit nonce, and a 128-bit tag. It uses a duplex construction + * on top of a 320-bit permutation. The permutation is a generalised + * version of sLiSCP-light, extended from 256 bits to 320 bits. + * ACE also has a companion hash algorithm with a 256-bit output. + * + * References: https://uwaterloo.ca/communications-security-lab/lwc/ace + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for ACE. + */ +#define ACE_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for ACE. + */ +#define ACE_TAG_SIZE 16 + +/** + * \brief Size of the nonce for ACE. + */ +#define ACE_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for ACE-HASH. + */ +#define ACE_HASH_SIZE 32 + +/** + * \brief Meta-information block for the ACE cipher. + */ +extern aead_cipher_t const ace_cipher; + +/** + * \brief Meta-information block for the ACE-HASH hash algorithm. + */ +extern aead_hash_algorithm_t const ace_hash_algorithm; + +/** + * \brief State information for the ACE-HASH incremental hash mode. + */ +typedef union +{ + struct { + unsigned char state[40]; /**< Current hash state */ + unsigned char count; /**< Number of bytes in the current block */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} ace_hash_state_t; + +/** + * \brief Encrypts and authenticates a packet with ACE. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa ace_aead_decrypt() + */ +int ace_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ACE. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa ace_aead_encrypt() + */ +int ace_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with ACE-HASH to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * ACE_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int ace_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an ACE-HASH hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa ace_hash_update(), ace_hash_finalize(), ace_hash() + */ +void ace_hash_init(ace_hash_state_t *state); + +/** + * \brief Updates the ACE-HASH state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa ace_hash_init(), ace_hash_finalize() + */ +void ace_hash_update + (ace_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an ACE-HASH hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 32-byte hash value. + * + * \sa ace_hash_init(), ace_hash_update() + */ +void ace_hash_finalize(ace_hash_state_t *state, unsigned char *out); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.c b/ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.h b/ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/api.h b/ace/Implementations/crypto_hash/acehash256v1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/hash.c b/ace/Implementations/crypto_hash/acehash256v1/rhys/hash.c new file mode 100644 index 0000000..388f638 --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "ace.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return ace_hash(out, in, inlen); +} diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-320-avr.S b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-320-avr.S new file mode 100644 index 0000000..2522d5c --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-320-avr.S @@ -0,0 +1,1767 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 96 +table_0: + .byte 7 + .byte 83 + .byte 67 + .byte 80 + .byte 40 + .byte 20 + .byte 10 + .byte 93 + .byte 228 + .byte 92 + .byte 174 + .byte 87 + .byte 155 + .byte 73 + .byte 94 + .byte 145 + .byte 72 + .byte 36 + .byte 224 + .byte 127 + .byte 204 + .byte 141 + .byte 198 + .byte 99 + .byte 209 + .byte 190 + .byte 50 + .byte 83 + .byte 169 + .byte 84 + .byte 26 + .byte 29 + .byte 78 + .byte 96 + .byte 48 + .byte 24 + .byte 34 + .byte 40 + .byte 117 + .byte 104 + .byte 52 + .byte 154 + .byte 247 + .byte 108 + .byte 37 + .byte 225 + .byte 112 + .byte 56 + .byte 98 + .byte 130 + .byte 253 + .byte 246 + .byte 123 + .byte 189 + .byte 150 + .byte 71 + .byte 249 + .byte 157 + .byte 206 + .byte 103 + .byte 113 + .byte 107 + .byte 118 + .byte 64 + .byte 32 + .byte 16 + .byte 170 + .byte 136 + .byte 160 + .byte 79 + .byte 39 + .byte 19 + .byte 43 + .byte 220 + .byte 176 + .byte 190 + .byte 95 + .byte 47 + .byte 233 + .byte 139 + .byte 9 + .byte 91 + .byte 173 + .byte 214 + .byte 207 + .byte 89 + .byte 30 + .byte 233 + .byte 116 + .byte 186 + .byte 183 + .byte 198 + .byte 173 + .byte 127 + .byte 63 + .byte 31 + + .text +.global sliscp_light320_permute + .type sliscp_light320_permute, @function +sliscp_light320_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 34 + ldd r21,Z+8 + ldd r20,Z+9 + ldd r19,Z+10 + ldd r18,Z+11 + ldd r27,Z+12 + ldd r26,Z+13 + ldd r23,Z+14 + ldd r22,Z+15 + ldd r5,Z+24 + ldd r4,Z+25 + ldd r3,Z+26 + ldd r2,Z+27 + ldd r9,Z+28 + ldd r8,Z+29 + ldd r7,Z+30 + ldd r6,Z+31 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r26 + std Y+8,r27 + std Y+9,r2 + std Y+10,r3 + std Y+11,r4 + std Y+12,r5 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ld r21,Z + ldd r20,Z+1 + ldd r19,Z+2 + ldd r18,Z+3 + ldd r27,Z+16 + ldd r26,Z+17 + ldd r23,Z+18 + ldd r22,Z+19 + ldd r5,Z+4 + ldd r4,Z+5 + ldd r3,Z+6 + ldd r2,Z+7 + ldd r9,Z+20 + ldd r8,Z+21 + ldd r7,Z+22 + ldd r6,Z+23 + ldd r13,Z+32 + ldd r12,Z+33 + ldd r11,Z+34 + ldd r10,Z+35 + ldd r25,Z+36 + ldd r24,Z+37 + ldd r15,Z+38 + ldd r14,Z+39 + push r31 + push r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r16,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r16 +#endif + ldi r30,0 +60: +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + inc r30 + push r30 + mov r30,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + movw r16,r18 + mov r1,r20 + mov r0,r21 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r18 + and r17,r19 + and r1,r20 + and r0,r21 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + com r23 + com r26 + com r27 + ldi r16,255 + lsr r30 + rol r16 + eor r22,r16 + movw r16,r22 + mov r1,r26 + mov r0,r27 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r22 + and r17,r23 + and r1,r26 + and r0,r27 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + com r19 + com r20 + com r21 + ldi r16,255 + lsr r30 + rol r16 + eor r18,r16 + pop r30 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + inc r30 + push r30 + mov r30,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r4 + mov r0,r5 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r4 + and r0,r5 + eor r6,r16 + eor r7,r17 + eor r8,r1 + eor r9,r0 + com r7 + com r8 + com r9 + ldi r16,255 + lsr r30 + rol r16 + eor r6,r16 + movw r16,r6 + mov r1,r8 + mov r0,r9 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r6 + and r17,r7 + and r1,r8 + and r0,r9 + eor r2,r16 + eor r3,r17 + eor r4,r1 + eor r5,r0 + com r3 + com r4 + com r5 + ldi r16,255 + lsr r30 + rol r16 + eor r2,r16 + pop r30 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + inc r30 + push r30 + mov r30,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + mov r0,r13 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + and r0,r13 + eor r14,r16 + eor r15,r17 + eor r24,r1 + eor r25,r0 + com r15 + com r24 + com r25 + ldi r16,255 + lsr r30 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r24 + mov r0,r25 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + bst r0,7 + lsl r16 + rol r17 + rol r1 + rol r0 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r24 + and r0,r25 + eor r10,r16 + eor r11,r17 + eor r12,r1 + eor r13,r0 + com r11 + com r12 + com r13 + ldi r16,255 + lsr r30 + rol r16 + eor r10,r16 + pop r30 + ldd r16,Y+1 + ldd r17,Y+2 + ldd r1,Y+3 + ldd r0,Y+4 + eor r16,r2 + eor r17,r3 + eor r1,r4 + eor r0,r5 + com r16 + com r17 + com r1 + com r0 + std Y+1,r16 + std Y+2,r17 + std Y+3,r1 + std Y+4,r0 + ldd r16,Y+5 + ldd r17,Y+6 + ldd r1,Y+7 + ldd r0,Y+8 + eor r16,r6 + eor r17,r7 + eor r1,r8 + eor r0,r9 + com r17 + com r1 + com r0 + std Y+6,r17 + std Y+7,r1 + std Y+8,r0 +#if defined(RAMPZ) + elpm r0,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r0,Z +#elif defined(__AVR_TINY__) + ld r0,Z +#else + lpm +#endif + inc r30 + eor r16,r0 + std Y+5,r16 + ldd r16,Y+9 + ldd r17,Y+10 + ldd r1,Y+11 + ldd r0,Y+12 + eor r16,r10 + eor r17,r11 + eor r1,r12 + eor r0,r13 + com r16 + com r17 + com r1 + com r0 + std Y+9,r16 + std Y+10,r17 + std Y+11,r1 + std Y+12,r0 + ldd r16,Y+13 + ldd r17,Y+14 + ldd r1,Y+15 + ldd r0,Y+16 + eor r16,r14 + eor r17,r15 + eor r1,r24 + eor r0,r25 + com r17 + com r1 + com r0 + std Y+14,r17 + std Y+15,r1 + std Y+16,r0 +#if defined(RAMPZ) + elpm r0,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r0,Z +#elif defined(__AVR_TINY__) + ld r0,Z +#else + lpm +#endif + inc r30 + eor r16,r0 + std Y+13,r16 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + com r10 + com r11 + com r12 + com r13 + eor r14,r22 + eor r15,r23 + eor r24,r26 + eor r25,r27 + com r15 + com r24 + com r25 +#if defined(RAMPZ) + elpm r0,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r0,Z +#elif defined(__AVR_TINY__) + ld r0,Z +#else + lpm +#endif + inc r30 + eor r14,r0 + movw r16,r10 + mov r1,r12 + mov r0,r13 + ldd r10,Y+1 + ldd r11,Y+2 + ldd r12,Y+3 + ldd r13,Y+4 + std Y+1,r2 + std Y+2,r3 + std Y+3,r4 + std Y+4,r5 + movw r2,r18 + movw r4,r20 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + std Y+9,r16 + std Y+10,r17 + std Y+11,r1 + std Y+12,r0 + movw r16,r14 + mov r1,r24 + mov r0,r25 + ldd r14,Y+5 + ldd r15,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + movw r6,r22 + movw r8,r26 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + std Y+13,r16 + std Y+14,r17 + std Y+15,r1 + std Y+16,r0 + ldi r17,96 + cpse r30,r17 + rjmp 60b +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r21 + std Z+1,r20 + std Z+2,r19 + std Z+3,r18 + std Z+16,r27 + std Z+17,r26 + std Z+18,r23 + std Z+19,r22 + std Z+4,r5 + std Z+5,r4 + std Z+6,r3 + std Z+7,r2 + std Z+20,r9 + std Z+21,r8 + std Z+22,r7 + std Z+23,r6 + std Z+32,r13 + std Z+33,r12 + std Z+34,r11 + std Z+35,r10 + std Z+36,r25 + std Z+37,r24 + std Z+38,r15 + std Z+39,r14 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + ldd r2,Y+9 + ldd r3,Y+10 + ldd r4,Y+11 + ldd r5,Y+12 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + std Z+8,r21 + std Z+9,r20 + std Z+10,r19 + std Z+11,r18 + std Z+12,r27 + std Z+13,r26 + std Z+14,r23 + std Z+15,r22 + std Z+24,r5 + std Z+25,r4 + std Z+26,r3 + std Z+27,r2 + std Z+28,r9 + std Z+29,r8 + std Z+30,r7 + std Z+31,r6 + adiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + eor r1,r1 + ret + .size sliscp_light320_permute, .-sliscp_light320_permute + + .text +.global sliscp_light320_swap + .type sliscp_light320_swap, @function +sliscp_light320_swap: + movw r30,r24 +.L__stack_usage = 2 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + ret + .size sliscp_light320_swap, .-sliscp_light320_swap + +#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.c b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.c new file mode 100644 index 0000000..dd3a688 --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.c @@ -0,0 +1,413 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-sliscp-light.h" + +#if !defined(__AVR__) + +/** + * \brief Performs one round of the Simeck-64 block cipher. + * + * \param x Left half of the 64-bit block. + * \param y Right half of the 64-bit block. + */ +#define simeck64_round(x, y) \ + do { \ + (y) ^= (leftRotate5((x)) & (x)) ^ leftRotate1((x)) ^ \ + 0xFFFFFFFEU ^ (_rc & 1); \ + _rc >>= 1; \ + } while (0) + +/** + * \brief Encrypts a 64-bit block with the 8 round version of Simeck-64. + * + * \param x Left half of the 64-bit block. + * \param y Right half of the 64-bit block. + * \param rc Round constants for the 8 rounds, 1 bit per round. + * + * It is assumed that the two halves have already been converted from + * big-endian to host byte order before calling this function. The output + * halves will also be in host byte order. + */ +#define simeck64_box(x, y, rc) \ + do { \ + unsigned char _rc = (rc); \ + simeck64_round(x, y); /* Round 1 */ \ + simeck64_round(y, x); /* Round 2 */ \ + simeck64_round(x, y); /* Round 3 */ \ + simeck64_round(y, x); /* Round 4 */ \ + simeck64_round(x, y); /* Round 5 */ \ + simeck64_round(y, x); /* Round 6 */ \ + simeck64_round(x, y); /* Round 7 */ \ + simeck64_round(y, x); /* Round 8 */ \ + } while (0) + +/* Helper macros for 48-bit left rotations */ +#define leftRotate5_48(x) (((x) << 5) | ((x) >> 19)) +#define leftRotate1_48(x) (((x) << 1) | ((x) >> 23)) + +/** + * \brief Performs one round of the Simeck-48 block cipher. + * + * \param x Left half of the 48-bit block. + * \param y Right half of the 48-bit block. + */ +#define simeck48_round(x, y) \ + do { \ + (y) ^= (leftRotate5_48((x)) & (x)) ^ leftRotate1_48((x)) ^ \ + 0x00FFFFFEU ^ (_rc & 1); \ + (y) &= 0x00FFFFFFU; \ + _rc >>= 1; \ + } while (0) + +/** + * \brief Encrypts a 48-bit block with the 6 round version of Simeck-48. + * + * \param x Left half of the 48-bit block. + * \param y Right half of the 48-bit block. + * \param rc Round constants for the 8 rounds, 1 bit per round. + * + * It is assumed that the two halves have already been converted from + * big-endian to host byte order before calling this function. The output + * halves will also be in host byte order. + */ +#define simeck48_box(x, y, rc) \ + do { \ + unsigned char _rc = (rc); \ + simeck48_round(x, y); /* Round 1 */ \ + simeck48_round(y, x); /* Round 2 */ \ + simeck48_round(x, y); /* Round 3 */ \ + simeck48_round(y, x); /* Round 4 */ \ + simeck48_round(x, y); /* Round 5 */ \ + simeck48_round(y, x); /* Round 6 */ \ + } while (0) + +/* Interleaved rc0, rc1, sc0, and sc1 values for each round */ +static unsigned char const sliscp_light256_RC[18 * 4] = { + 0x0f, 0x47, 0x08, 0x64, 0x04, 0xb2, 0x86, 0x6b, + 0x43, 0xb5, 0xe2, 0x6f, 0xf1, 0x37, 0x89, 0x2c, + 0x44, 0x96, 0xe6, 0xdd, 0x73, 0xee, 0xca, 0x99, + 0xe5, 0x4c, 0x17, 0xea, 0x0b, 0xf5, 0x8e, 0x0f, + 0x47, 0x07, 0x64, 0x04, 0xb2, 0x82, 0x6b, 0x43, + 0xb5, 0xa1, 0x6f, 0xf1, 0x37, 0x78, 0x2c, 0x44, + 0x96, 0xa2, 0xdd, 0x73, 0xee, 0xb9, 0x99, 0xe5, + 0x4c, 0xf2, 0xea, 0x0b, 0xf5, 0x85, 0x0f, 0x47, + 0x07, 0x23, 0x04, 0xb2, 0x82, 0xd9, 0x43, 0xb5 +}; + +void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds) +{ + const unsigned char *rc = sliscp_light256_RC; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7; + uint32_t t0, t1; + + /* Load the block into local state variables */ + x0 = be_load_word32(block); + x1 = be_load_word32(block + 4); + x2 = be_load_word32(block + 8); + x3 = be_load_word32(block + 24); /* Assumes the block is pre-swapped */ + x4 = be_load_word32(block + 16); + x5 = be_load_word32(block + 20); + x6 = be_load_word32(block + 12); + x7 = be_load_word32(block + 28); + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds, rc += 4) { + /* Apply Simeck-64 to two of the 64-bit sub-blocks */ + simeck64_box(x2, x3, rc[0]); + simeck64_box(x6, x7, rc[1]); + + /* Add step constants */ + x0 ^= 0xFFFFFFFFU; + x1 ^= 0xFFFFFF00U ^ rc[2]; + x4 ^= 0xFFFFFFFFU; + x5 ^= 0xFFFFFF00U ^ rc[3]; + + /* Mix the sub-blocks */ + t0 = x0 ^ x2; + t1 = x1 ^ x3; + x0 = x2; + x1 = x3; + x2 = x4 ^ x6; + x3 = x5 ^ x7; + x4 = x6; + x5 = x7; + x6 = t0; + x7 = t1; + } + + /* Store the state back into the block */ + be_store_word32(block, x0); + be_store_word32(block + 4, x1); + be_store_word32(block + 8, x2); + be_store_word32(block + 24, x3); /* Assumes the block is pre-swapped */ + be_store_word32(block + 16, x4); + be_store_word32(block + 20, x5); + be_store_word32(block + 12, x6); + be_store_word32(block + 28, x7); +} + +void sliscp_light256_swap_spix(unsigned char block[32]) +{ + uint32_t t1, t2; + t1 = le_load_word32(block + 12); + t2 = le_load_word32(block + 24); + le_store_word32(block + 24, t1); + le_store_word32(block + 12, t2); +} + +void sliscp_light256_permute_spoc(unsigned char block[32]) +{ + const unsigned char *rc = sliscp_light256_RC; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7; + uint32_t t0, t1; + unsigned round; + + /* Load the block into local state variables */ + x0 = be_load_word32(block); + x1 = be_load_word32(block + 4); + x2 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ + x3 = be_load_word32(block + 20); + x4 = be_load_word32(block + 8); + x5 = be_load_word32(block + 12); + x6 = be_load_word32(block + 24); + x7 = be_load_word32(block + 28); + + /* Perform all permutation rounds */ + for (round = 0; round < 18; ++round, rc += 4) { + /* Apply Simeck-64 to two of the 64-bit sub-blocks */ + simeck64_box(x2, x3, rc[0]); + simeck64_box(x6, x7, rc[1]); + + /* Add step constants */ + x0 ^= 0xFFFFFFFFU; + x1 ^= 0xFFFFFF00U ^ rc[2]; + x4 ^= 0xFFFFFFFFU; + x5 ^= 0xFFFFFF00U ^ rc[3]; + + /* Mix the sub-blocks */ + t0 = x0 ^ x2; + t1 = x1 ^ x3; + x0 = x2; + x1 = x3; + x2 = x4 ^ x6; + x3 = x5 ^ x7; + x4 = x6; + x5 = x7; + x6 = t0; + x7 = t1; + } + + /* Store the state back into the block */ + be_store_word32(block, x0); + be_store_word32(block + 4, x1); + be_store_word32(block + 16, x2); /* Assumes the block is pre-swapped */ + be_store_word32(block + 20, x3); + be_store_word32(block + 8, x4); + be_store_word32(block + 12, x5); + be_store_word32(block + 24, x6); + be_store_word32(block + 28, x7); +} + +void sliscp_light256_swap_spoc(unsigned char block[32]) +{ + uint64_t t1, t2; + t1 = le_load_word64(block + 8); + t2 = le_load_word64(block + 16); + le_store_word64(block + 16, t1); + le_store_word64(block + 8, t2); +} + +/* Load a big-endian 24-bit word from a byte buffer */ +#define be_load_word24(ptr) \ + ((((uint32_t)((ptr)[0])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[2]))) + +/* Store a big-endian 24-bit word into a byte buffer */ +#define be_store_word24(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 16); \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)_x; \ + } while (0) + +void sliscp_light192_permute(unsigned char block[24]) +{ + /* Interleaved rc0, rc1, sc0, and sc1 values for each round */ + static unsigned char const RC[18 * 4] = { + 0x07, 0x27, 0x08, 0x29, 0x04, 0x34, 0x0c, 0x1d, + 0x06, 0x2e, 0x0a, 0x33, 0x25, 0x19, 0x2f, 0x2a, + 0x17, 0x35, 0x38, 0x1f, 0x1c, 0x0f, 0x24, 0x10, + 0x12, 0x08, 0x36, 0x18, 0x3b, 0x0c, 0x0d, 0x14, + 0x26, 0x0a, 0x2b, 0x1e, 0x15, 0x2f, 0x3e, 0x31, + 0x3f, 0x38, 0x01, 0x09, 0x20, 0x24, 0x21, 0x2d, + 0x30, 0x36, 0x11, 0x1b, 0x28, 0x0d, 0x39, 0x16, + 0x3c, 0x2b, 0x05, 0x3d, 0x22, 0x3e, 0x27, 0x03, + 0x13, 0x01, 0x34, 0x02, 0x1a, 0x21, 0x2e, 0x23 + }; + const unsigned char *rc = RC; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7; + uint32_t t0, t1; + unsigned round; + + /* Load the block into local state variables. Each 24-bit block is + * placed into a separate 32-bit word which improves efficiency below */ + x0 = be_load_word24(block); + x1 = be_load_word24(block + 3); + x2 = be_load_word24(block + 6); + x3 = be_load_word24(block + 9); + x4 = be_load_word24(block + 12); + x5 = be_load_word24(block + 15); + x6 = be_load_word24(block + 18); + x7 = be_load_word24(block + 21); + + /* Perform all permutation rounds */ + for (round = 0; round < 18; ++round, rc += 4) { + /* Apply Simeck-48 to two of the 48-bit sub-blocks */ + simeck48_box(x2, x3, rc[0]); + simeck48_box(x6, x7, rc[1]); + + /* Add step constants */ + x0 ^= 0x00FFFFFFU; + x1 ^= 0x00FFFF00U ^ rc[2]; + x4 ^= 0x00FFFFFFU; + x5 ^= 0x00FFFF00U ^ rc[3]; + + /* Mix the sub-blocks */ + t0 = x0 ^ x2; + t1 = x1 ^ x3; + x0 = x2; + x1 = x3; + x2 = x4 ^ x6; + x3 = x5 ^ x7; + x4 = x6; + x5 = x7; + x6 = t0; + x7 = t1; + } + + /* Store the state back into the block */ + be_store_word24(block, x0); + be_store_word24(block + 3, x1); + be_store_word24(block + 6, x2); + be_store_word24(block + 9, x3); + be_store_word24(block + 12, x4); + be_store_word24(block + 15, x5); + be_store_word24(block + 18, x6); + be_store_word24(block + 21, x7); +} + +void sliscp_light320_permute(unsigned char block[40]) +{ + /* Interleaved rc0, rc1, rc2, sc0, sc1, and sc2 values for each round */ + static unsigned char const RC[16 * 6] = { + 0x07, 0x53, 0x43, 0x50, 0x28, 0x14, 0x0a, 0x5d, + 0xe4, 0x5c, 0xae, 0x57, 0x9b, 0x49, 0x5e, 0x91, + 0x48, 0x24, 0xe0, 0x7f, 0xcc, 0x8d, 0xc6, 0x63, + 0xd1, 0xbe, 0x32, 0x53, 0xa9, 0x54, 0x1a, 0x1d, + 0x4e, 0x60, 0x30, 0x18, 0x22, 0x28, 0x75, 0x68, + 0x34, 0x9a, 0xf7, 0x6c, 0x25, 0xe1, 0x70, 0x38, + 0x62, 0x82, 0xfd, 0xf6, 0x7b, 0xbd, 0x96, 0x47, + 0xf9, 0x9d, 0xce, 0x67, 0x71, 0x6b, 0x76, 0x40, + 0x20, 0x10, 0xaa, 0x88, 0xa0, 0x4f, 0x27, 0x13, + 0x2b, 0xdc, 0xb0, 0xbe, 0x5f, 0x2f, 0xe9, 0x8b, + 0x09, 0x5b, 0xad, 0xd6, 0xcf, 0x59, 0x1e, 0xe9, + 0x74, 0xba, 0xb7, 0xc6, 0xad, 0x7f, 0x3f, 0x1f + }; + const unsigned char *rc = RC; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; + uint32_t t0, t1; + unsigned round; + + /* Load the block into local state variables */ + x0 = be_load_word32(block); + x1 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ + x2 = be_load_word32(block + 8); + x3 = be_load_word32(block + 12); + x4 = be_load_word32(block + 4); + x5 = be_load_word32(block + 20); + x6 = be_load_word32(block + 24); + x7 = be_load_word32(block + 28); + x8 = be_load_word32(block + 32); + x9 = be_load_word32(block + 36); + + /* Perform all permutation rounds */ + for (round = 0; round < 16; ++round, rc += 6) { + /* Apply Simeck-64 to three of the 64-bit sub-blocks */ + simeck64_box(x0, x1, rc[0]); + simeck64_box(x4, x5, rc[1]); + simeck64_box(x8, x9, rc[2]); + x6 ^= x8; + x7 ^= x9; + x2 ^= x4; + x3 ^= x5; + x8 ^= x0; + x9 ^= x1; + + /* Add step constants */ + x2 ^= 0xFFFFFFFFU; + x3 ^= 0xFFFFFF00U ^ rc[3]; + x6 ^= 0xFFFFFFFFU; + x7 ^= 0xFFFFFF00U ^ rc[4]; + x8 ^= 0xFFFFFFFFU; + x9 ^= 0xFFFFFF00U ^ rc[5]; + + /* Rotate the sub-blocks */ + t0 = x8; + t1 = x9; + x8 = x2; + x9 = x3; + x2 = x4; + x3 = x5; + x4 = x0; + x5 = x1; + x0 = x6; + x1 = x7; + x6 = t0; + x7 = t1; + } + + /* Store the state back into the block */ + be_store_word32(block, x0); + be_store_word32(block + 16, x1); /* Assumes the block is pre-swapped */ + be_store_word32(block + 8, x2); + be_store_word32(block + 12, x3); + be_store_word32(block + 4, x4); + be_store_word32(block + 20, x5); + be_store_word32(block + 24, x6); + be_store_word32(block + 28, x7); + be_store_word32(block + 32, x8); + be_store_word32(block + 36, x9); +} + +void sliscp_light320_swap(unsigned char block[40]) +{ + uint32_t t1, t2; + t1 = le_load_word32(block + 4); + t2 = le_load_word32(block + 16); + le_store_word32(block + 16, t1); + le_store_word32(block + 4, t2); +} + +#endif /* !__AVR__ */ diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.h b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.h new file mode 100644 index 0000000..8a5e8d5 --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-sliscp-light.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SLISCP_LIGHT_H +#define LW_INTERNAL_SLISCP_LIGHT_H + +/** + * \file internal-sliscp-light.h + * \brief sLiSCP-light permutation + * + * There are three variants of sLiSCP-light in use in the NIST submissions: + * + * \li sLiSCP-light-256 with a 256-bit block size, used in SPIX and SpoC. + * \li sLiSCP-light-192 with a 192-bit block size, used in SpoC. + * \li sLiSCP-light-320 with a 320-bit block size, used in ACE. + * + * References: https://uwaterloo.ca/communications-security-lab/lwc/ace, + * https://uwaterloo.ca/communications-security-lab/lwc/spix, + * https://uwaterloo.ca/communications-security-lab/lwc/spoc + */ + +#include "internal-util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the state for sLiSCP-light-256. + */ +#define SLISCP_LIGHT256_STATE_SIZE 32 + +/** + * \brief Size of the state for sLiSCP-light-192. + */ +#define SLISCP_LIGHT192_STATE_SIZE 24 + +/** + * \brief Size of the state for sLiSCP-light-320. + */ +#define SLISCP_LIGHT320_STATE_SIZE 40 + +/** + * \brief Performs the sLiSCP-light permutation on a 256-bit block. + * + * \param block Points to the block to be permuted. + * \param rounds Number of rounds to be performed, usually 9 or 18. + * + * The bytes of the block are assumed to be rearranged to match the + * requirements of the SPIX cipher. SPIX places the rate bytes at + * positions 8, 9, 10, 11, 24, 25, 26, and 27. + * + * This function assumes that bytes 24-27 have been pre-swapped with + * bytes 12-15 so that the rate portion of the state is contiguous. + * + * The sliscp_light256_swap_spix() function can be used to switch + * between the canonical order and the pre-swapped order. + * + * \sa sliscp_light256_swap_spix() + */ +void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds); + +/** + * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SPIX. + * + * \param block Points to the block to be rate-swapped. + * + * \sa sliscp_light256_permute_spix() + */ +void sliscp_light256_swap_spix(unsigned char block[32]); + +/** + * \brief Performs the sLiSCP-light permutation on a 256-bit block. + * + * \param block Points to the block to be permuted. + * + * The bytes of the block are assumed to be rearranged to match the + * requirements of the SpoC-128 cipher. SpoC-128 interleaves the + * rate bytes and the mask bytes. This version assumes that the + * rate and mask are in contiguous bytes of the state. + * + * SpoC-128 absorbs bytes using the mask bytes of the state at offsets + * 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, and 31. + * It squeezes bytes using the rate bytes of the state at offsets + * 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, and 23. + * + * This function assumes that bytes 8-15 have been pre-swapped with 16-23 + * so that the rate and mask portions of the state are contiguous. + * + * The sliscp_light256_swap_spoc() function can be used to switch + * between the canonical order and the pre-swapped order. + * + * \sa sliscp_light256_swap_spoc() + */ +void sliscp_light256_permute_spoc(unsigned char block[32]); + +/** + * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. + * + * \param block Points to the block to be rate-swapped. + * + * \sa sliscp_light256_permute_spoc() + */ +void sliscp_light256_swap_spoc(unsigned char block[32]); + +/** + * \brief Performs the sLiSCP-light permutation on a 192-bit block. + * + * \param block Points to the block to be permuted. + */ +void sliscp_light192_permute(unsigned char block[24]); + +/** + * \brief Performs the sLiSCP-light permutation on a 320-bit block. + * + * \param block Points to the block to be permuted. + * + * The ACE specification refers to this permutation as "ACE" but that + * can be confused with the name of the AEAD mode so we call this + * permutation "sLiSCP-light-320" instead. + * + * ACE absorbs and squeezes data at the rate bytes 0, 1, 2, 3, 16, 17, 18, 19. + * Efficiency can suffer because of the discontinuity in rate byte positions. + * + * To counteract this, we assume that the input to the permutation has been + * pre-swapped: bytes 4, 5, 6, 7 are swapped with bytes 16, 17, 18, 19 so + * that the rate is contiguous at the start of the state. + * + * The sliscp_light320_swap() function can be used to switch between the + * canonical order and the pre-swapped order. + * + * \sa sliscp_light320_swap() + */ +void sliscp_light320_permute(unsigned char block[40]); + +/** + * \brief Swaps rate bytes in a sLiSCP-light 320-bit block. + * + * \param block Points to the block to be rate-swapped. + * + * \sa sliscp_light320_permute() + */ +void sliscp_light320_swap(unsigned char block[40]); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-util.h b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/ace/Implementations/crypto_hash/acehash256v1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.c b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.h b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/api.h b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.c b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.c deleted file mode 100644 index 80b2e46..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.c +++ /dev/null @@ -1,383 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "ascon128.h" -#include "internal-ascon.h" -#include - -/** - * \brief Initialization vector for ASCON-128. - */ -#define ASCON128_IV 0x80400c0600000000ULL - -/** - * \brief Initialization vector for ASCON-128a. - */ -#define ASCON128a_IV 0x80800c0800000000ULL - -/** - * \brief Initialization vector for ASCON-80pq. - */ -#define ASCON80PQ_IV 0xa0400c06U - -aead_cipher_t const ascon128_cipher = { - "ASCON-128", - ASCON128_KEY_SIZE, - ASCON128_NONCE_SIZE, - ASCON128_TAG_SIZE, - AEAD_FLAG_NONE, - ascon128_aead_encrypt, - ascon128_aead_decrypt -}; - -aead_cipher_t const ascon128a_cipher = { - "ASCON-128a", - ASCON128_KEY_SIZE, - ASCON128_NONCE_SIZE, - ASCON128_TAG_SIZE, - AEAD_FLAG_NONE, - ascon128a_aead_encrypt, - ascon128a_aead_decrypt -}; - -aead_cipher_t const ascon80pq_cipher = { - "ASCON-80pq", - ASCON80PQ_KEY_SIZE, - ASCON80PQ_NONCE_SIZE, - ASCON80PQ_TAG_SIZE, - AEAD_FLAG_NONE, - ascon80pq_aead_encrypt, - ascon80pq_aead_decrypt -}; - -/** - * \brief Absorbs data into an ASCON state. - * - * \param state The state to absorb the data into. - * \param data Points to the data to be absorbed. - * \param len Length of the data to be absorbed. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_absorb - (ascon_state_t *state, const unsigned char *data, - unsigned long long len, uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block(state->B, data, rate); - ascon_permute(state, first_round); - data += rate; - len -= rate; - } - lw_xor_block(state->B, data, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; - ascon_permute(state, first_round); -} - -/** - * \brief Encrypts a block of data with an ASCON state. - * - * \param state The state to encrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to encrypt from \a src into \a dest. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_encrypt - (ascon_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len, - uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block_2_dest(dest, state->B, src, rate); - ascon_permute(state, first_round); - dest += rate; - src += rate; - len -= rate; - } - lw_xor_block_2_dest(dest, state->B, src, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; -} - -/** - * \brief Decrypts a block of data with an ASCON state. - * - * \param state The state to decrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to decrypt from \a src into \a dest. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_decrypt - (ascon_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len, - uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block_swap(dest, state->B, src, rate); - ascon_permute(state, first_round); - dest += rate; - src += rate; - len -= rate; - } - lw_xor_block_swap(dest, state->B, src, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; -} - -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 8, 6); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k, 16); - return 0; -} - -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON128_TAG_SIZE) - return -1; - *mlen = clen - ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 8, 6); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON128_TAG_SIZE); -} - -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128a_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 16, 4); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 16, 4); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 16, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k, 16); - return 0; -} - -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON128_TAG_SIZE) - return -1; - *mlen = clen - ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128a_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 16, 4); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 16, 4); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 16, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON128_TAG_SIZE); -} - -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON80PQ_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word32(state.B, ASCON80PQ_IV); - memcpy(state.B + 4, k, ASCON80PQ_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON80PQ_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 20, k, ASCON80PQ_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 8, 6); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON80PQ_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k + 4, 16); - return 0; -} - -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON80PQ_TAG_SIZE) - return -1; - *mlen = clen - ASCON80PQ_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word32(state.B, ASCON80PQ_IV); - memcpy(state.B + 4, k, ASCON80PQ_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON80PQ_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 20, k, ASCON80PQ_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 8, 6); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON80PQ_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k + 4, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON80PQ_TAG_SIZE); -} diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.h b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.h deleted file mode 100644 index fd9db13..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/ascon128.h +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ASCON_H -#define LWCRYPTO_ASCON_H - -#include "aead-common.h" - -/** - * \file ascon128.h - * \brief ASCON-128 encryption algorithm and related family members. - * - * The ASCON family consists of several related algorithms: - * - * \li ASCON-128 with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. - * \li ASCON-128a with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 128 bits. This is faster than ASCON-128 but may - * not be as secure. - * \li ASCON-80pq with a 160-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. This is similar to ASCON-128 but has a - * 160-bit key instead which may be more resistant against quantum computers. - * \li ASCON-HASH with a 256-bit hash output. - * - * References: https://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ASCON-128 and ASCON-128a. - */ -#define ASCON128_KEY_SIZE 16 - -/** - * \brief Size of the nonce for ASCON-128 and ASCON-128a. - */ -#define ASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-128 and ASCON-128a. - */ -#define ASCON128_TAG_SIZE 16 - -/** - * \brief Size of the key for ASCON-80pq. - */ -#define ASCON80PQ_KEY_SIZE 20 - -/** - * \brief Size of the nonce for ASCON-80pq. - */ -#define ASCON80PQ_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-80pq. - */ -#define ASCON80PQ_TAG_SIZE 16 - -/** - * \brief Size of the hash output for ASCON-HASH. - */ -#define ASCON_HASH_SIZE 32 - -/** - * \brief State information for ASCON-HASH and ASCON-XOF incremental modes. - */ -typedef union -{ - struct { - unsigned char state[40]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} ascon_hash_state_t; - -/** - * \brief Meta-information block for the ASCON-128 cipher. - */ -extern aead_cipher_t const ascon128_cipher; - -/** - * \brief Meta-information block for the ASCON-128a cipher. - */ -extern aead_cipher_t const ascon128a_cipher; - -/** - * \brief Meta-information block for the ASCON-80pq cipher. - */ -extern aead_cipher_t const ascon80pq_cipher; - -/** - * \brief Meta-information block for the ASCON-HASH algorithm. - */ -extern aead_hash_algorithm_t const ascon_hash_algorithm; - -/** - * \brief Meta-information block for the ASCON-XOF algorithm. - */ -extern aead_hash_algorithm_t const ascon_xof_algorithm; - -/** - * \brief Encrypts and authenticates a packet with ASCON-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128_aead_decrypt() - */ -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128_aead_encrypt() - */ -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-128a. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128a_aead_decrypt() - */ -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128a. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128a_aead_encrypt() - */ -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-80pq. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon80pq_aead_decrypt() - */ -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-80pq. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon80pq_aead_encrypt() - */ -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ASCON-HASH. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * \sa ascon_hash_init(), ascon_hash_absorb(), ascon_hash_squeeze() - */ -int ascon_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_hash_update(), ascon_hash_finalize(), ascon_hash() - */ -void ascon_hash_init(ascon_hash_state_t *state); - -/** - * \brief Updates an ASCON-HASH state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa ascon_hash_init(), ascon_hash_finalize() - */ -void ascon_hash_update - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an ASCON-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa ascon_hash_init(), ascon_hash_update() - */ -void ascon_hash_finalize - (ascon_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with ASCON-XOF and generates a - * fixed-length 32 byte output. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * Use ascon_xof_squeeze() instead if you need variable-length XOF ouutput. - * - * \sa ascon_xof_init(), ascon_xof_absorb(), ascon_xof_squeeze() - */ -int ascon_xof - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-XOF hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_xof_absorb(), ascon_xof_squeeze(), ascon_xof() - */ -void ascon_xof_init(ascon_hash_state_t *state); - -/** - * \brief Aborbs more input data into an ASCON-XOF state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -void ascon_xof_absorb - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from an ASCON-XOF state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa ascon_xof_init(), ascon_xof_update() - */ -void ascon_xof_squeeze - (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/encrypt.c b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/encrypt.c deleted file mode 100644 index 4f35480..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "ascon128.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return ascon128a_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return ascon128a_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon-avr.S b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.c b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.h b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-util.h b/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon-avr.S b/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon.c b/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon.c index 12a8ec6..657aabe 100644 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon.c +++ b/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-ascon.c @@ -22,6 +22,8 @@ #include "internal-ascon.h" +#if !defined(__AVR__) + void ascon_permute(ascon_state_t *state, uint8_t first_round) { uint64_t t0, t1, t2, t3, t4; @@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round) state->S[4] = x4; #endif } + +#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-util.h b/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-util.h index e79158c..e30166d 100644 --- a/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-util.h +++ b/ascon/Implementations/crypto_aead/ascon128av12/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.c b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.h b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/api.h b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.c b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.c deleted file mode 100644 index 80b2e46..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.c +++ /dev/null @@ -1,383 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "ascon128.h" -#include "internal-ascon.h" -#include - -/** - * \brief Initialization vector for ASCON-128. - */ -#define ASCON128_IV 0x80400c0600000000ULL - -/** - * \brief Initialization vector for ASCON-128a. - */ -#define ASCON128a_IV 0x80800c0800000000ULL - -/** - * \brief Initialization vector for ASCON-80pq. - */ -#define ASCON80PQ_IV 0xa0400c06U - -aead_cipher_t const ascon128_cipher = { - "ASCON-128", - ASCON128_KEY_SIZE, - ASCON128_NONCE_SIZE, - ASCON128_TAG_SIZE, - AEAD_FLAG_NONE, - ascon128_aead_encrypt, - ascon128_aead_decrypt -}; - -aead_cipher_t const ascon128a_cipher = { - "ASCON-128a", - ASCON128_KEY_SIZE, - ASCON128_NONCE_SIZE, - ASCON128_TAG_SIZE, - AEAD_FLAG_NONE, - ascon128a_aead_encrypt, - ascon128a_aead_decrypt -}; - -aead_cipher_t const ascon80pq_cipher = { - "ASCON-80pq", - ASCON80PQ_KEY_SIZE, - ASCON80PQ_NONCE_SIZE, - ASCON80PQ_TAG_SIZE, - AEAD_FLAG_NONE, - ascon80pq_aead_encrypt, - ascon80pq_aead_decrypt -}; - -/** - * \brief Absorbs data into an ASCON state. - * - * \param state The state to absorb the data into. - * \param data Points to the data to be absorbed. - * \param len Length of the data to be absorbed. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_absorb - (ascon_state_t *state, const unsigned char *data, - unsigned long long len, uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block(state->B, data, rate); - ascon_permute(state, first_round); - data += rate; - len -= rate; - } - lw_xor_block(state->B, data, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; - ascon_permute(state, first_round); -} - -/** - * \brief Encrypts a block of data with an ASCON state. - * - * \param state The state to encrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to encrypt from \a src into \a dest. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_encrypt - (ascon_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len, - uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block_2_dest(dest, state->B, src, rate); - ascon_permute(state, first_round); - dest += rate; - src += rate; - len -= rate; - } - lw_xor_block_2_dest(dest, state->B, src, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; -} - -/** - * \brief Decrypts a block of data with an ASCON state. - * - * \param state The state to decrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to decrypt from \a src into \a dest. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_decrypt - (ascon_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len, - uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block_swap(dest, state->B, src, rate); - ascon_permute(state, first_round); - dest += rate; - src += rate; - len -= rate; - } - lw_xor_block_swap(dest, state->B, src, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; -} - -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 8, 6); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k, 16); - return 0; -} - -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON128_TAG_SIZE) - return -1; - *mlen = clen - ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 8, 6); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON128_TAG_SIZE); -} - -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128a_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 16, 4); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 16, 4); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 16, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k, 16); - return 0; -} - -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON128_TAG_SIZE) - return -1; - *mlen = clen - ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128a_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 16, 4); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 16, 4); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 16, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON128_TAG_SIZE); -} - -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON80PQ_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word32(state.B, ASCON80PQ_IV); - memcpy(state.B + 4, k, ASCON80PQ_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON80PQ_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 20, k, ASCON80PQ_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 8, 6); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON80PQ_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k + 4, 16); - return 0; -} - -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON80PQ_TAG_SIZE) - return -1; - *mlen = clen - ASCON80PQ_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word32(state.B, ASCON80PQ_IV); - memcpy(state.B + 4, k, ASCON80PQ_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON80PQ_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 20, k, ASCON80PQ_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 8, 6); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON80PQ_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k + 4, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON80PQ_TAG_SIZE); -} diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.h b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.h deleted file mode 100644 index fd9db13..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/ascon128.h +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ASCON_H -#define LWCRYPTO_ASCON_H - -#include "aead-common.h" - -/** - * \file ascon128.h - * \brief ASCON-128 encryption algorithm and related family members. - * - * The ASCON family consists of several related algorithms: - * - * \li ASCON-128 with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. - * \li ASCON-128a with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 128 bits. This is faster than ASCON-128 but may - * not be as secure. - * \li ASCON-80pq with a 160-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. This is similar to ASCON-128 but has a - * 160-bit key instead which may be more resistant against quantum computers. - * \li ASCON-HASH with a 256-bit hash output. - * - * References: https://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ASCON-128 and ASCON-128a. - */ -#define ASCON128_KEY_SIZE 16 - -/** - * \brief Size of the nonce for ASCON-128 and ASCON-128a. - */ -#define ASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-128 and ASCON-128a. - */ -#define ASCON128_TAG_SIZE 16 - -/** - * \brief Size of the key for ASCON-80pq. - */ -#define ASCON80PQ_KEY_SIZE 20 - -/** - * \brief Size of the nonce for ASCON-80pq. - */ -#define ASCON80PQ_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-80pq. - */ -#define ASCON80PQ_TAG_SIZE 16 - -/** - * \brief Size of the hash output for ASCON-HASH. - */ -#define ASCON_HASH_SIZE 32 - -/** - * \brief State information for ASCON-HASH and ASCON-XOF incremental modes. - */ -typedef union -{ - struct { - unsigned char state[40]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} ascon_hash_state_t; - -/** - * \brief Meta-information block for the ASCON-128 cipher. - */ -extern aead_cipher_t const ascon128_cipher; - -/** - * \brief Meta-information block for the ASCON-128a cipher. - */ -extern aead_cipher_t const ascon128a_cipher; - -/** - * \brief Meta-information block for the ASCON-80pq cipher. - */ -extern aead_cipher_t const ascon80pq_cipher; - -/** - * \brief Meta-information block for the ASCON-HASH algorithm. - */ -extern aead_hash_algorithm_t const ascon_hash_algorithm; - -/** - * \brief Meta-information block for the ASCON-XOF algorithm. - */ -extern aead_hash_algorithm_t const ascon_xof_algorithm; - -/** - * \brief Encrypts and authenticates a packet with ASCON-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128_aead_decrypt() - */ -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128_aead_encrypt() - */ -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-128a. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128a_aead_decrypt() - */ -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128a. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128a_aead_encrypt() - */ -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-80pq. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon80pq_aead_decrypt() - */ -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-80pq. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon80pq_aead_encrypt() - */ -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ASCON-HASH. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * \sa ascon_hash_init(), ascon_hash_absorb(), ascon_hash_squeeze() - */ -int ascon_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_hash_update(), ascon_hash_finalize(), ascon_hash() - */ -void ascon_hash_init(ascon_hash_state_t *state); - -/** - * \brief Updates an ASCON-HASH state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa ascon_hash_init(), ascon_hash_finalize() - */ -void ascon_hash_update - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an ASCON-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa ascon_hash_init(), ascon_hash_update() - */ -void ascon_hash_finalize - (ascon_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with ASCON-XOF and generates a - * fixed-length 32 byte output. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * Use ascon_xof_squeeze() instead if you need variable-length XOF ouutput. - * - * \sa ascon_xof_init(), ascon_xof_absorb(), ascon_xof_squeeze() - */ -int ascon_xof - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-XOF hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_xof_absorb(), ascon_xof_squeeze(), ascon_xof() - */ -void ascon_xof_init(ascon_hash_state_t *state); - -/** - * \brief Aborbs more input data into an ASCON-XOF state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -void ascon_xof_absorb - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from an ASCON-XOF state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa ascon_xof_init(), ascon_xof_update() - */ -void ascon_xof_squeeze - (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/encrypt.c b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/encrypt.c deleted file mode 100644 index f32284a..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "ascon128.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return ascon128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return ascon128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon-avr.S b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.c b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.h b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-util.h b/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon-avr.S b/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon.c b/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon.c index 12a8ec6..657aabe 100644 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon.c +++ b/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-ascon.c @@ -22,6 +22,8 @@ #include "internal-ascon.h" +#if !defined(__AVR__) + void ascon_permute(ascon_state_t *state, uint8_t first_round) { uint64_t t0, t1, t2, t3, t4; @@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round) state->S[4] = x4; #endif } + +#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-util.h b/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-util.h index e79158c..e30166d 100644 --- a/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-util.h +++ b/ascon/Implementations/crypto_aead/ascon128v12/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.c b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.h b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/api.h b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/api.h deleted file mode 100644 index f99b349..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 20 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.c b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.c deleted file mode 100644 index 80b2e46..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.c +++ /dev/null @@ -1,383 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "ascon128.h" -#include "internal-ascon.h" -#include - -/** - * \brief Initialization vector for ASCON-128. - */ -#define ASCON128_IV 0x80400c0600000000ULL - -/** - * \brief Initialization vector for ASCON-128a. - */ -#define ASCON128a_IV 0x80800c0800000000ULL - -/** - * \brief Initialization vector for ASCON-80pq. - */ -#define ASCON80PQ_IV 0xa0400c06U - -aead_cipher_t const ascon128_cipher = { - "ASCON-128", - ASCON128_KEY_SIZE, - ASCON128_NONCE_SIZE, - ASCON128_TAG_SIZE, - AEAD_FLAG_NONE, - ascon128_aead_encrypt, - ascon128_aead_decrypt -}; - -aead_cipher_t const ascon128a_cipher = { - "ASCON-128a", - ASCON128_KEY_SIZE, - ASCON128_NONCE_SIZE, - ASCON128_TAG_SIZE, - AEAD_FLAG_NONE, - ascon128a_aead_encrypt, - ascon128a_aead_decrypt -}; - -aead_cipher_t const ascon80pq_cipher = { - "ASCON-80pq", - ASCON80PQ_KEY_SIZE, - ASCON80PQ_NONCE_SIZE, - ASCON80PQ_TAG_SIZE, - AEAD_FLAG_NONE, - ascon80pq_aead_encrypt, - ascon80pq_aead_decrypt -}; - -/** - * \brief Absorbs data into an ASCON state. - * - * \param state The state to absorb the data into. - * \param data Points to the data to be absorbed. - * \param len Length of the data to be absorbed. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_absorb - (ascon_state_t *state, const unsigned char *data, - unsigned long long len, uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block(state->B, data, rate); - ascon_permute(state, first_round); - data += rate; - len -= rate; - } - lw_xor_block(state->B, data, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; - ascon_permute(state, first_round); -} - -/** - * \brief Encrypts a block of data with an ASCON state. - * - * \param state The state to encrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to encrypt from \a src into \a dest. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_encrypt - (ascon_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len, - uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block_2_dest(dest, state->B, src, rate); - ascon_permute(state, first_round); - dest += rate; - src += rate; - len -= rate; - } - lw_xor_block_2_dest(dest, state->B, src, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; -} - -/** - * \brief Decrypts a block of data with an ASCON state. - * - * \param state The state to decrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to decrypt from \a src into \a dest. - * \param rate Block rate, which is either 8 or 16. - * \param first_round First round of the permutation to apply each block. - */ -static void ascon_decrypt - (ascon_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len, - uint8_t rate, uint8_t first_round) -{ - while (len >= rate) { - lw_xor_block_swap(dest, state->B, src, rate); - ascon_permute(state, first_round); - dest += rate; - src += rate; - len -= rate; - } - lw_xor_block_swap(dest, state->B, src, (unsigned)len); - state->B[(unsigned)len] ^= 0x80; -} - -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 8, 6); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k, 16); - return 0; -} - -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON128_TAG_SIZE) - return -1; - *mlen = clen - ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 8, 6); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON128_TAG_SIZE); -} - -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128a_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 16, 4); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 16, 4); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 16, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k, 16); - return 0; -} - -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON128_TAG_SIZE) - return -1; - *mlen = clen - ASCON128_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word64(state.B, ASCON128a_IV); - memcpy(state.B + 8, k, ASCON128_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON128_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, ASCON128_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 16, 4); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 16, 4); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 16, k, ASCON128_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON128_TAG_SIZE); -} - -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ASCON80PQ_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word32(state.B, ASCON80PQ_IV); - memcpy(state.B + 4, k, ASCON80PQ_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON80PQ_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 20, k, ASCON80PQ_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Encrypt the plaintext to create the ciphertext */ - ascon_encrypt(&state, c, m, mlen, 8, 6); - - /* Finalize and compute the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON80PQ_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block_2_src(c + mlen, state.B + 24, k + 4, 16); - return 0; -} - -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ascon_state_t state; - (void)nsec; - - /* Set the length of the returned plaintext */ - if (clen < ASCON80PQ_TAG_SIZE) - return -1; - *mlen = clen - ASCON80PQ_TAG_SIZE; - - /* Initialize the ASCON state */ - be_store_word32(state.B, ASCON80PQ_IV); - memcpy(state.B + 4, k, ASCON80PQ_KEY_SIZE); - memcpy(state.B + 24, npub, ASCON80PQ_NONCE_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 20, k, ASCON80PQ_KEY_SIZE); - - /* Absorb the associated data into the state */ - if (adlen > 0) - ascon_absorb(&state, ad, adlen, 8, 6); - - /* Separator between the associated data and the payload */ - state.B[39] ^= 0x01; - - /* Decrypt the ciphertext to create the plaintext */ - ascon_decrypt(&state, m, c, *mlen, 8, 6); - - /* Finalize and check the authentication tag */ - lw_xor_block(state.B + 8, k, ASCON80PQ_KEY_SIZE); - ascon_permute(&state, 0); - lw_xor_block(state.B + 24, k + 4, 16); - return aead_check_tag - (m, *mlen, state.B + 24, c + *mlen, ASCON80PQ_TAG_SIZE); -} diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.h b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.h deleted file mode 100644 index fd9db13..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/ascon128.h +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ASCON_H -#define LWCRYPTO_ASCON_H - -#include "aead-common.h" - -/** - * \file ascon128.h - * \brief ASCON-128 encryption algorithm and related family members. - * - * The ASCON family consists of several related algorithms: - * - * \li ASCON-128 with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. - * \li ASCON-128a with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 128 bits. This is faster than ASCON-128 but may - * not be as secure. - * \li ASCON-80pq with a 160-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. This is similar to ASCON-128 but has a - * 160-bit key instead which may be more resistant against quantum computers. - * \li ASCON-HASH with a 256-bit hash output. - * - * References: https://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ASCON-128 and ASCON-128a. - */ -#define ASCON128_KEY_SIZE 16 - -/** - * \brief Size of the nonce for ASCON-128 and ASCON-128a. - */ -#define ASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-128 and ASCON-128a. - */ -#define ASCON128_TAG_SIZE 16 - -/** - * \brief Size of the key for ASCON-80pq. - */ -#define ASCON80PQ_KEY_SIZE 20 - -/** - * \brief Size of the nonce for ASCON-80pq. - */ -#define ASCON80PQ_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-80pq. - */ -#define ASCON80PQ_TAG_SIZE 16 - -/** - * \brief Size of the hash output for ASCON-HASH. - */ -#define ASCON_HASH_SIZE 32 - -/** - * \brief State information for ASCON-HASH and ASCON-XOF incremental modes. - */ -typedef union -{ - struct { - unsigned char state[40]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} ascon_hash_state_t; - -/** - * \brief Meta-information block for the ASCON-128 cipher. - */ -extern aead_cipher_t const ascon128_cipher; - -/** - * \brief Meta-information block for the ASCON-128a cipher. - */ -extern aead_cipher_t const ascon128a_cipher; - -/** - * \brief Meta-information block for the ASCON-80pq cipher. - */ -extern aead_cipher_t const ascon80pq_cipher; - -/** - * \brief Meta-information block for the ASCON-HASH algorithm. - */ -extern aead_hash_algorithm_t const ascon_hash_algorithm; - -/** - * \brief Meta-information block for the ASCON-XOF algorithm. - */ -extern aead_hash_algorithm_t const ascon_xof_algorithm; - -/** - * \brief Encrypts and authenticates a packet with ASCON-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128_aead_decrypt() - */ -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128_aead_encrypt() - */ -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-128a. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128a_aead_decrypt() - */ -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128a. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128a_aead_encrypt() - */ -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-80pq. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon80pq_aead_decrypt() - */ -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-80pq. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon80pq_aead_encrypt() - */ -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ASCON-HASH. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * \sa ascon_hash_init(), ascon_hash_absorb(), ascon_hash_squeeze() - */ -int ascon_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_hash_update(), ascon_hash_finalize(), ascon_hash() - */ -void ascon_hash_init(ascon_hash_state_t *state); - -/** - * \brief Updates an ASCON-HASH state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa ascon_hash_init(), ascon_hash_finalize() - */ -void ascon_hash_update - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an ASCON-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa ascon_hash_init(), ascon_hash_update() - */ -void ascon_hash_finalize - (ascon_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with ASCON-XOF and generates a - * fixed-length 32 byte output. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * Use ascon_xof_squeeze() instead if you need variable-length XOF ouutput. - * - * \sa ascon_xof_init(), ascon_xof_absorb(), ascon_xof_squeeze() - */ -int ascon_xof - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-XOF hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_xof_absorb(), ascon_xof_squeeze(), ascon_xof() - */ -void ascon_xof_init(ascon_hash_state_t *state); - -/** - * \brief Aborbs more input data into an ASCON-XOF state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -void ascon_xof_absorb - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from an ASCON-XOF state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa ascon_xof_init(), ascon_xof_update() - */ -void ascon_xof_squeeze - (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/encrypt.c b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/encrypt.c deleted file mode 100644 index 08b7dc9..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "ascon128.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return ascon80pq_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return ascon80pq_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon-avr.S b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.c b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.h b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-util.h b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon-avr.S b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon.c b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon.c index 12a8ec6..657aabe 100644 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon.c +++ b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-ascon.c @@ -22,6 +22,8 @@ #include "internal-ascon.h" +#if !defined(__AVR__) + void ascon_permute(ascon_state_t *state, uint8_t first_round) { uint64_t t0, t1, t2, t3, t4; @@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round) state->S[4] = x4; #endif } + +#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-util.h b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-util.h index e79158c..e30166d 100644 --- a/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-util.h +++ b/ascon/Implementations/crypto_aead/ascon80pqv12/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/api.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon-hash.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon-hash.c deleted file mode 100644 index b2c570d..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon-hash.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "ascon128.h" -#include "internal-ascon.h" -#include - -#define ASCON_HASH_RATE 8 -#define ascon_hash_permute() \ - ascon_permute((ascon_state_t *)(state->s.state), 0) - -aead_hash_algorithm_t const ascon_hash_algorithm = { - "ASCON-HASH", - sizeof(ascon_hash_state_t), - ASCON_HASH_SIZE, - AEAD_FLAG_NONE, - ascon_hash, - (aead_hash_init_t)ascon_hash_init, - (aead_hash_update_t)ascon_hash_update, - (aead_hash_finalize_t)ascon_hash_finalize, - 0, /* absorb */ - 0 /* squeeze */ -}; - -int ascon_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - ascon_hash_state_t state; - ascon_hash_init(&state); - ascon_hash_update(&state, in, inlen); - ascon_hash_finalize(&state, out); - return 0; -} - -void ascon_hash_init(ascon_hash_state_t *state) -{ - static unsigned char const hash_iv[40] = { - 0xee, 0x93, 0x98, 0xaa, 0xdb, 0x67, 0xf0, 0x3d, - 0x8b, 0xb2, 0x18, 0x31, 0xc6, 0x0f, 0x10, 0x02, - 0xb4, 0x8a, 0x92, 0xdb, 0x98, 0xd5, 0xda, 0x62, - 0x43, 0x18, 0x99, 0x21, 0xb8, 0xf8, 0xe3, 0xe8, - 0x34, 0x8f, 0xa5, 0xc9, 0xd5, 0x25, 0xe1, 0x40 - }; - memcpy(state->s.state, hash_iv, sizeof(hash_iv)); - state->s.count = 0; - state->s.mode = 0; -} - -void ascon_hash_update - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned temp; - - /* Handle the partial left-over block from last time */ - if (state->s.count) { - temp = ASCON_HASH_RATE - state->s.count; - if (temp > inlen) { - temp = (unsigned)inlen; - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count += temp; - return; - } - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count = 0; - in += temp; - inlen -= temp; - ascon_hash_permute(); - } - - /* Process full blocks that are aligned at state->s.count == 0 */ - while (inlen >= ASCON_HASH_RATE) { - lw_xor_block(state->s.state, in, ASCON_HASH_RATE); - in += ASCON_HASH_RATE; - inlen -= ASCON_HASH_RATE; - ascon_hash_permute(); - } - - /* Process the left-over block at the end of the input */ - temp = (unsigned)inlen; - lw_xor_block(state->s.state, in, temp); - state->s.count = temp; -} - -void ascon_hash_finalize - (ascon_hash_state_t *state, unsigned char *out) -{ - unsigned index; - - /* Pad the final block */ - state->s.state[state->s.count] ^= 0x80; - - /* Squeeze out the finalized hash value */ - for (index = 0; index < ASCON_HASH_SIZE; index += ASCON_HASH_RATE) { - ascon_hash_permute(); - memcpy(out, state->s.state, ASCON_HASH_RATE); - out += ASCON_HASH_RATE; - } -} diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon128.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon128.h deleted file mode 100644 index fd9db13..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/ascon128.h +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ASCON_H -#define LWCRYPTO_ASCON_H - -#include "aead-common.h" - -/** - * \file ascon128.h - * \brief ASCON-128 encryption algorithm and related family members. - * - * The ASCON family consists of several related algorithms: - * - * \li ASCON-128 with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. - * \li ASCON-128a with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 128 bits. This is faster than ASCON-128 but may - * not be as secure. - * \li ASCON-80pq with a 160-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. This is similar to ASCON-128 but has a - * 160-bit key instead which may be more resistant against quantum computers. - * \li ASCON-HASH with a 256-bit hash output. - * - * References: https://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ASCON-128 and ASCON-128a. - */ -#define ASCON128_KEY_SIZE 16 - -/** - * \brief Size of the nonce for ASCON-128 and ASCON-128a. - */ -#define ASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-128 and ASCON-128a. - */ -#define ASCON128_TAG_SIZE 16 - -/** - * \brief Size of the key for ASCON-80pq. - */ -#define ASCON80PQ_KEY_SIZE 20 - -/** - * \brief Size of the nonce for ASCON-80pq. - */ -#define ASCON80PQ_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-80pq. - */ -#define ASCON80PQ_TAG_SIZE 16 - -/** - * \brief Size of the hash output for ASCON-HASH. - */ -#define ASCON_HASH_SIZE 32 - -/** - * \brief State information for ASCON-HASH and ASCON-XOF incremental modes. - */ -typedef union -{ - struct { - unsigned char state[40]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} ascon_hash_state_t; - -/** - * \brief Meta-information block for the ASCON-128 cipher. - */ -extern aead_cipher_t const ascon128_cipher; - -/** - * \brief Meta-information block for the ASCON-128a cipher. - */ -extern aead_cipher_t const ascon128a_cipher; - -/** - * \brief Meta-information block for the ASCON-80pq cipher. - */ -extern aead_cipher_t const ascon80pq_cipher; - -/** - * \brief Meta-information block for the ASCON-HASH algorithm. - */ -extern aead_hash_algorithm_t const ascon_hash_algorithm; - -/** - * \brief Meta-information block for the ASCON-XOF algorithm. - */ -extern aead_hash_algorithm_t const ascon_xof_algorithm; - -/** - * \brief Encrypts and authenticates a packet with ASCON-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128_aead_decrypt() - */ -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128_aead_encrypt() - */ -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-128a. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128a_aead_decrypt() - */ -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128a. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128a_aead_encrypt() - */ -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-80pq. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon80pq_aead_decrypt() - */ -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-80pq. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon80pq_aead_encrypt() - */ -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ASCON-HASH. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * \sa ascon_hash_init(), ascon_hash_absorb(), ascon_hash_squeeze() - */ -int ascon_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_hash_update(), ascon_hash_finalize(), ascon_hash() - */ -void ascon_hash_init(ascon_hash_state_t *state); - -/** - * \brief Updates an ASCON-HASH state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa ascon_hash_init(), ascon_hash_finalize() - */ -void ascon_hash_update - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an ASCON-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa ascon_hash_init(), ascon_hash_update() - */ -void ascon_hash_finalize - (ascon_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with ASCON-XOF and generates a - * fixed-length 32 byte output. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * Use ascon_xof_squeeze() instead if you need variable-length XOF ouutput. - * - * \sa ascon_xof_init(), ascon_xof_absorb(), ascon_xof_squeeze() - */ -int ascon_xof - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-XOF hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_xof_absorb(), ascon_xof_squeeze(), ascon_xof() - */ -void ascon_xof_init(ascon_hash_state_t *state); - -/** - * \brief Aborbs more input data into an ASCON-XOF state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -void ascon_xof_absorb - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from an ASCON-XOF state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa ascon_xof_init(), ascon_xof_update() - */ -void ascon_xof_squeeze - (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/hash.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/hash.c deleted file mode 100644 index 5c69526..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "ascon128.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return ascon_hash(out, in, inlen); -} diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon-avr.S b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-util.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/ascon/Implementations/crypto_hash/asconhashv12/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/api.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon-hash.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon-hash.c new file mode 100644 index 0000000..b2c570d --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon-hash.c @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ascon128.h" +#include "internal-ascon.h" +#include + +#define ASCON_HASH_RATE 8 +#define ascon_hash_permute() \ + ascon_permute((ascon_state_t *)(state->s.state), 0) + +aead_hash_algorithm_t const ascon_hash_algorithm = { + "ASCON-HASH", + sizeof(ascon_hash_state_t), + ASCON_HASH_SIZE, + AEAD_FLAG_NONE, + ascon_hash, + (aead_hash_init_t)ascon_hash_init, + (aead_hash_update_t)ascon_hash_update, + (aead_hash_finalize_t)ascon_hash_finalize, + 0, /* absorb */ + 0 /* squeeze */ +}; + +int ascon_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + ascon_hash_state_t state; + ascon_hash_init(&state); + ascon_hash_update(&state, in, inlen); + ascon_hash_finalize(&state, out); + return 0; +} + +void ascon_hash_init(ascon_hash_state_t *state) +{ + static unsigned char const hash_iv[40] = { + 0xee, 0x93, 0x98, 0xaa, 0xdb, 0x67, 0xf0, 0x3d, + 0x8b, 0xb2, 0x18, 0x31, 0xc6, 0x0f, 0x10, 0x02, + 0xb4, 0x8a, 0x92, 0xdb, 0x98, 0xd5, 0xda, 0x62, + 0x43, 0x18, 0x99, 0x21, 0xb8, 0xf8, 0xe3, 0xe8, + 0x34, 0x8f, 0xa5, 0xc9, 0xd5, 0x25, 0xe1, 0x40 + }; + memcpy(state->s.state, hash_iv, sizeof(hash_iv)); + state->s.count = 0; + state->s.mode = 0; +} + +void ascon_hash_update + (ascon_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + unsigned temp; + + /* Handle the partial left-over block from last time */ + if (state->s.count) { + temp = ASCON_HASH_RATE - state->s.count; + if (temp > inlen) { + temp = (unsigned)inlen; + lw_xor_block(state->s.state + state->s.count, in, temp); + state->s.count += temp; + return; + } + lw_xor_block(state->s.state + state->s.count, in, temp); + state->s.count = 0; + in += temp; + inlen -= temp; + ascon_hash_permute(); + } + + /* Process full blocks that are aligned at state->s.count == 0 */ + while (inlen >= ASCON_HASH_RATE) { + lw_xor_block(state->s.state, in, ASCON_HASH_RATE); + in += ASCON_HASH_RATE; + inlen -= ASCON_HASH_RATE; + ascon_hash_permute(); + } + + /* Process the left-over block at the end of the input */ + temp = (unsigned)inlen; + lw_xor_block(state->s.state, in, temp); + state->s.count = temp; +} + +void ascon_hash_finalize + (ascon_hash_state_t *state, unsigned char *out) +{ + unsigned index; + + /* Pad the final block */ + state->s.state[state->s.count] ^= 0x80; + + /* Squeeze out the finalized hash value */ + for (index = 0; index < ASCON_HASH_SIZE; index += ASCON_HASH_RATE) { + ascon_hash_permute(); + memcpy(out, state->s.state, ASCON_HASH_RATE); + out += ASCON_HASH_RATE; + } +} diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon128.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon128.h new file mode 100644 index 0000000..fd9db13 --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/ascon128.h @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_ASCON_H +#define LWCRYPTO_ASCON_H + +#include "aead-common.h" + +/** + * \file ascon128.h + * \brief ASCON-128 encryption algorithm and related family members. + * + * The ASCON family consists of several related algorithms: + * + * \li ASCON-128 with a 128-bit key, a 128-bit nonce, a 128-bit authentication + * tag, and a block rate of 64 bits. + * \li ASCON-128a with a 128-bit key, a 128-bit nonce, a 128-bit authentication + * tag, and a block rate of 128 bits. This is faster than ASCON-128 but may + * not be as secure. + * \li ASCON-80pq with a 160-bit key, a 128-bit nonce, a 128-bit authentication + * tag, and a block rate of 64 bits. This is similar to ASCON-128 but has a + * 160-bit key instead which may be more resistant against quantum computers. + * \li ASCON-HASH with a 256-bit hash output. + * + * References: https://ascon.iaik.tugraz.at/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for ASCON-128 and ASCON-128a. + */ +#define ASCON128_KEY_SIZE 16 + +/** + * \brief Size of the nonce for ASCON-128 and ASCON-128a. + */ +#define ASCON128_NONCE_SIZE 16 + +/** + * \brief Size of the authentication tag for ASCON-128 and ASCON-128a. + */ +#define ASCON128_TAG_SIZE 16 + +/** + * \brief Size of the key for ASCON-80pq. + */ +#define ASCON80PQ_KEY_SIZE 20 + +/** + * \brief Size of the nonce for ASCON-80pq. + */ +#define ASCON80PQ_NONCE_SIZE 16 + +/** + * \brief Size of the authentication tag for ASCON-80pq. + */ +#define ASCON80PQ_TAG_SIZE 16 + +/** + * \brief Size of the hash output for ASCON-HASH. + */ +#define ASCON_HASH_SIZE 32 + +/** + * \brief State information for ASCON-HASH and ASCON-XOF incremental modes. + */ +typedef union +{ + struct { + unsigned char state[40]; /**< Current hash state */ + unsigned char count; /**< Number of bytes in the current block */ + unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} ascon_hash_state_t; + +/** + * \brief Meta-information block for the ASCON-128 cipher. + */ +extern aead_cipher_t const ascon128_cipher; + +/** + * \brief Meta-information block for the ASCON-128a cipher. + */ +extern aead_cipher_t const ascon128a_cipher; + +/** + * \brief Meta-information block for the ASCON-80pq cipher. + */ +extern aead_cipher_t const ascon80pq_cipher; + +/** + * \brief Meta-information block for the ASCON-HASH algorithm. + */ +extern aead_hash_algorithm_t const ascon_hash_algorithm; + +/** + * \brief Meta-information block for the ASCON-XOF algorithm. + */ +extern aead_hash_algorithm_t const ascon_xof_algorithm; + +/** + * \brief Encrypts and authenticates a packet with ASCON-128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa ascon128_aead_decrypt() + */ +int ascon128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ASCON-128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa ascon128_aead_encrypt() + */ +int ascon128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with ASCON-128a. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa ascon128a_aead_decrypt() + */ +int ascon128a_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ASCON-128a. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa ascon128a_aead_encrypt() + */ +int ascon128a_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with ASCON-80pq. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 20 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa ascon80pq_aead_decrypt() + */ +int ascon80pq_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ASCON-80pq. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 20 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa ascon80pq_aead_encrypt() + */ +int ascon80pq_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with ASCON-HASH. + * + * \param out Buffer to receive the hash output which must be at least + * ASCON_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + * + * \sa ascon_hash_init(), ascon_hash_absorb(), ascon_hash_squeeze() + */ +int ascon_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an ASCON-HASH hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa ascon_hash_update(), ascon_hash_finalize(), ascon_hash() + */ +void ascon_hash_init(ascon_hash_state_t *state); + +/** + * \brief Updates an ASCON-HASH state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa ascon_hash_init(), ascon_hash_finalize() + */ +void ascon_hash_update + (ascon_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an ASCON-HASH hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 32-byte hash value. + * + * \sa ascon_hash_init(), ascon_hash_update() + */ +void ascon_hash_finalize + (ascon_hash_state_t *state, unsigned char *out); + +/** + * \brief Hashes a block of input data with ASCON-XOF and generates a + * fixed-length 32 byte output. + * + * \param out Buffer to receive the hash output which must be at least + * ASCON_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + * + * Use ascon_xof_squeeze() instead if you need variable-length XOF ouutput. + * + * \sa ascon_xof_init(), ascon_xof_absorb(), ascon_xof_squeeze() + */ +int ascon_xof + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an ASCON-XOF hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa ascon_xof_absorb(), ascon_xof_squeeze(), ascon_xof() + */ +void ascon_xof_init(ascon_hash_state_t *state); + +/** + * \brief Aborbs more input data into an ASCON-XOF state. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +void ascon_xof_absorb + (ascon_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Squeezes output data from an ASCON-XOF state. + * + * \param state Hash state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + * + * \sa ascon_xof_init(), ascon_xof_update() + */ +void ascon_xof_squeeze + (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/hash.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys/hash.c new file mode 100644 index 0000000..5c69526 --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "ascon128.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return ascon_hash(out, in, inlen); +} diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon-avr.S b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.c b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.c new file mode 100644 index 0000000..657aabe --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.c @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-ascon.h" + +#if !defined(__AVR__) + +void ascon_permute(ascon_state_t *state, uint8_t first_round) +{ + uint64_t t0, t1, t2, t3, t4; +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = be_load_word64(state->B); + uint64_t x1 = be_load_word64(state->B + 8); + uint64_t x2 = be_load_word64(state->B + 16); + uint64_t x3 = be_load_word64(state->B + 24); + uint64_t x4 = be_load_word64(state->B + 32); +#else + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; +#endif + while (first_round < 12) { + /* Add the round constant to the state */ + x2 ^= ((0x0F - first_round) << 4) | first_round; + + /* Substitution layer - apply the s-box using bit-slicing + * according to the algorithm recommended in the specification */ + x0 ^= x4; x4 ^= x3; x2 ^= x1; + t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; + t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; + x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; + x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; + + /* Linear diffusion layer */ + x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); + x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); + x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); + x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); + x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); + + /* Move onto the next round */ + ++first_round; + } +#if defined(LW_UTIL_LITTLE_ENDIAN) + be_store_word64(state->B, x0); + be_store_word64(state->B + 8, x1); + be_store_word64(state->B + 16, x2); + be_store_word64(state->B + 24, x3); + be_store_word64(state->B + 32, x4); +#else + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; +#endif +} + +#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.h new file mode 100644 index 0000000..d3fa3ca --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-ascon.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_ASCON_H +#define LW_INTERNAL_ASCON_H + +#include "internal-util.h" + +/** + * \file internal-ascon.h + * \brief Internal implementation of the ASCON permutation. + * + * References: http://competitions.cr.yp.to/round3/asconv12.pdf, + * http://ascon.iaik.tugraz.at/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Structure of the internal state of the ASCON permutation. + */ +typedef union +{ + uint64_t S[5]; /**< Words of the state */ + uint8_t B[40]; /**< Bytes of the state */ + +} ascon_state_t; + +/** + * \brief Permutes the ASCON state. + * + * \param state The ASCON state to be permuted. + * \param first_round The first round (of 12) to be performed; 0, 4, or 6. + * + * The input and output \a state will be in big-endian byte order. + */ +void ascon_permute(ascon_state_t *state, uint8_t first_round); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-util.h b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconhashv12/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/api.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon-xof.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon-xof.c deleted file mode 100644 index 1d1e71f..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon-xof.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "ascon128.h" -#include "internal-ascon.h" -#include - -#define ASCON_XOF_RATE 8 -#define ascon_xof_permute() \ - ascon_permute((ascon_state_t *)(state->s.state), 0) - -aead_hash_algorithm_t const ascon_xof_algorithm = { - "ASCON-XOF", - sizeof(ascon_hash_state_t), - ASCON_HASH_SIZE, - AEAD_FLAG_NONE, - ascon_xof, - (aead_hash_init_t)ascon_xof_init, - 0, /* update */ - 0, /* finalize */ - (aead_xof_absorb_t)ascon_xof_absorb, - (aead_xof_squeeze_t)ascon_xof_squeeze -}; - -int ascon_xof - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - ascon_hash_state_t state; - ascon_xof_init(&state); - ascon_xof_absorb(&state, in, inlen); - ascon_xof_squeeze(&state, out, ASCON_HASH_SIZE); - return 0; -} - -void ascon_xof_init(ascon_hash_state_t *state) -{ - static unsigned char const xof_iv[40] = { - 0xb5, 0x7e, 0x27, 0x3b, 0x81, 0x4c, 0xd4, 0x16, - 0x2b, 0x51, 0x04, 0x25, 0x62, 0xae, 0x24, 0x20, - 0x66, 0xa3, 0xa7, 0x76, 0x8d, 0xdf, 0x22, 0x18, - 0x5a, 0xad, 0x0a, 0x7a, 0x81, 0x53, 0x65, 0x0c, - 0x4f, 0x3e, 0x0e, 0x32, 0x53, 0x94, 0x93, 0xb6 - }; - memcpy(state->s.state, xof_iv, sizeof(xof_iv)); - state->s.count = 0; - state->s.mode = 0; -} - -void ascon_xof_absorb - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - if (state->s.mode) { - /* We were squeezing output - go back to the absorb phase */ - state->s.mode = 0; - state->s.count = 0; - ascon_xof_permute(); - } - ascon_hash_update(state, in, inlen); -} - -void ascon_xof_squeeze - (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen) -{ - unsigned temp; - - /* Pad the final input block if we were still in the absorb phase */ - if (!state->s.mode) { - state->s.state[state->s.count] ^= 0x80; - state->s.count = 0; - state->s.mode = 1; - } - - /* Handle left-over partial blocks from last time */ - if (state->s.count) { - temp = ASCON_XOF_RATE - state->s.count; - if (temp > outlen) { - temp = (unsigned)outlen; - memcpy(out, state->s.state + state->s.count, temp); - state->s.count += temp; - return; - } - memcpy(out, state->s.state + state->s.count, temp); - out += temp; - outlen -= temp; - state->s.count = 0; - } - - /* Handle full blocks */ - while (outlen >= ASCON_XOF_RATE) { - ascon_xof_permute(); - memcpy(out, state->s.state, ASCON_XOF_RATE); - out += ASCON_XOF_RATE; - outlen -= ASCON_XOF_RATE; - } - - /* Handle the left-over block */ - if (outlen > 0) { - temp = (unsigned)outlen; - ascon_xof_permute(); - memcpy(out, state->s.state, temp); - state->s.count = temp; - } -} diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon128.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon128.h deleted file mode 100644 index fd9db13..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/ascon128.h +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ASCON_H -#define LWCRYPTO_ASCON_H - -#include "aead-common.h" - -/** - * \file ascon128.h - * \brief ASCON-128 encryption algorithm and related family members. - * - * The ASCON family consists of several related algorithms: - * - * \li ASCON-128 with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. - * \li ASCON-128a with a 128-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 128 bits. This is faster than ASCON-128 but may - * not be as secure. - * \li ASCON-80pq with a 160-bit key, a 128-bit nonce, a 128-bit authentication - * tag, and a block rate of 64 bits. This is similar to ASCON-128 but has a - * 160-bit key instead which may be more resistant against quantum computers. - * \li ASCON-HASH with a 256-bit hash output. - * - * References: https://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ASCON-128 and ASCON-128a. - */ -#define ASCON128_KEY_SIZE 16 - -/** - * \brief Size of the nonce for ASCON-128 and ASCON-128a. - */ -#define ASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-128 and ASCON-128a. - */ -#define ASCON128_TAG_SIZE 16 - -/** - * \brief Size of the key for ASCON-80pq. - */ -#define ASCON80PQ_KEY_SIZE 20 - -/** - * \brief Size of the nonce for ASCON-80pq. - */ -#define ASCON80PQ_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for ASCON-80pq. - */ -#define ASCON80PQ_TAG_SIZE 16 - -/** - * \brief Size of the hash output for ASCON-HASH. - */ -#define ASCON_HASH_SIZE 32 - -/** - * \brief State information for ASCON-HASH and ASCON-XOF incremental modes. - */ -typedef union -{ - struct { - unsigned char state[40]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} ascon_hash_state_t; - -/** - * \brief Meta-information block for the ASCON-128 cipher. - */ -extern aead_cipher_t const ascon128_cipher; - -/** - * \brief Meta-information block for the ASCON-128a cipher. - */ -extern aead_cipher_t const ascon128a_cipher; - -/** - * \brief Meta-information block for the ASCON-80pq cipher. - */ -extern aead_cipher_t const ascon80pq_cipher; - -/** - * \brief Meta-information block for the ASCON-HASH algorithm. - */ -extern aead_hash_algorithm_t const ascon_hash_algorithm; - -/** - * \brief Meta-information block for the ASCON-XOF algorithm. - */ -extern aead_hash_algorithm_t const ascon_xof_algorithm; - -/** - * \brief Encrypts and authenticates a packet with ASCON-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128_aead_decrypt() - */ -int ascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128_aead_encrypt() - */ -int ascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-128a. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon128a_aead_decrypt() - */ -int ascon128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-128a. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon128a_aead_encrypt() - */ -int ascon128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ASCON-80pq. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa ascon80pq_aead_decrypt() - */ -int ascon80pq_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ASCON-80pq. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 20 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa ascon80pq_aead_encrypt() - */ -int ascon80pq_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ASCON-HASH. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * \sa ascon_hash_init(), ascon_hash_absorb(), ascon_hash_squeeze() - */ -int ascon_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_hash_update(), ascon_hash_finalize(), ascon_hash() - */ -void ascon_hash_init(ascon_hash_state_t *state); - -/** - * \brief Updates an ASCON-HASH state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa ascon_hash_init(), ascon_hash_finalize() - */ -void ascon_hash_update - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an ASCON-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa ascon_hash_init(), ascon_hash_update() - */ -void ascon_hash_finalize - (ascon_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with ASCON-XOF and generates a - * fixed-length 32 byte output. - * - * \param out Buffer to receive the hash output which must be at least - * ASCON_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * Use ascon_xof_squeeze() instead if you need variable-length XOF ouutput. - * - * \sa ascon_xof_init(), ascon_xof_absorb(), ascon_xof_squeeze() - */ -int ascon_xof - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an ASCON-XOF hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa ascon_xof_absorb(), ascon_xof_squeeze(), ascon_xof() - */ -void ascon_xof_init(ascon_hash_state_t *state); - -/** - * \brief Aborbs more input data into an ASCON-XOF state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -void ascon_xof_absorb - (ascon_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from an ASCON-XOF state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa ascon_xof_init(), ascon_xof_update() - */ -void ascon_xof_squeeze - (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/hash.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/hash.c deleted file mode 100644 index 6f37a9d..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "ascon128.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return ascon_xof(out, in, inlen); -} diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon-avr.S b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-util.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/ascon/Implementations/crypto_hash/asconxofv12/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/api.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon-xof.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon-xof.c new file mode 100644 index 0000000..1d1e71f --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon-xof.c @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ascon128.h" +#include "internal-ascon.h" +#include + +#define ASCON_XOF_RATE 8 +#define ascon_xof_permute() \ + ascon_permute((ascon_state_t *)(state->s.state), 0) + +aead_hash_algorithm_t const ascon_xof_algorithm = { + "ASCON-XOF", + sizeof(ascon_hash_state_t), + ASCON_HASH_SIZE, + AEAD_FLAG_NONE, + ascon_xof, + (aead_hash_init_t)ascon_xof_init, + 0, /* update */ + 0, /* finalize */ + (aead_xof_absorb_t)ascon_xof_absorb, + (aead_xof_squeeze_t)ascon_xof_squeeze +}; + +int ascon_xof + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + ascon_hash_state_t state; + ascon_xof_init(&state); + ascon_xof_absorb(&state, in, inlen); + ascon_xof_squeeze(&state, out, ASCON_HASH_SIZE); + return 0; +} + +void ascon_xof_init(ascon_hash_state_t *state) +{ + static unsigned char const xof_iv[40] = { + 0xb5, 0x7e, 0x27, 0x3b, 0x81, 0x4c, 0xd4, 0x16, + 0x2b, 0x51, 0x04, 0x25, 0x62, 0xae, 0x24, 0x20, + 0x66, 0xa3, 0xa7, 0x76, 0x8d, 0xdf, 0x22, 0x18, + 0x5a, 0xad, 0x0a, 0x7a, 0x81, 0x53, 0x65, 0x0c, + 0x4f, 0x3e, 0x0e, 0x32, 0x53, 0x94, 0x93, 0xb6 + }; + memcpy(state->s.state, xof_iv, sizeof(xof_iv)); + state->s.count = 0; + state->s.mode = 0; +} + +void ascon_xof_absorb + (ascon_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + if (state->s.mode) { + /* We were squeezing output - go back to the absorb phase */ + state->s.mode = 0; + state->s.count = 0; + ascon_xof_permute(); + } + ascon_hash_update(state, in, inlen); +} + +void ascon_xof_squeeze + (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen) +{ + unsigned temp; + + /* Pad the final input block if we were still in the absorb phase */ + if (!state->s.mode) { + state->s.state[state->s.count] ^= 0x80; + state->s.count = 0; + state->s.mode = 1; + } + + /* Handle left-over partial blocks from last time */ + if (state->s.count) { + temp = ASCON_XOF_RATE - state->s.count; + if (temp > outlen) { + temp = (unsigned)outlen; + memcpy(out, state->s.state + state->s.count, temp); + state->s.count += temp; + return; + } + memcpy(out, state->s.state + state->s.count, temp); + out += temp; + outlen -= temp; + state->s.count = 0; + } + + /* Handle full blocks */ + while (outlen >= ASCON_XOF_RATE) { + ascon_xof_permute(); + memcpy(out, state->s.state, ASCON_XOF_RATE); + out += ASCON_XOF_RATE; + outlen -= ASCON_XOF_RATE; + } + + /* Handle the left-over block */ + if (outlen > 0) { + temp = (unsigned)outlen; + ascon_xof_permute(); + memcpy(out, state->s.state, temp); + state->s.count = temp; + } +} diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon128.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon128.h new file mode 100644 index 0000000..fd9db13 --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/ascon128.h @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_ASCON_H +#define LWCRYPTO_ASCON_H + +#include "aead-common.h" + +/** + * \file ascon128.h + * \brief ASCON-128 encryption algorithm and related family members. + * + * The ASCON family consists of several related algorithms: + * + * \li ASCON-128 with a 128-bit key, a 128-bit nonce, a 128-bit authentication + * tag, and a block rate of 64 bits. + * \li ASCON-128a with a 128-bit key, a 128-bit nonce, a 128-bit authentication + * tag, and a block rate of 128 bits. This is faster than ASCON-128 but may + * not be as secure. + * \li ASCON-80pq with a 160-bit key, a 128-bit nonce, a 128-bit authentication + * tag, and a block rate of 64 bits. This is similar to ASCON-128 but has a + * 160-bit key instead which may be more resistant against quantum computers. + * \li ASCON-HASH with a 256-bit hash output. + * + * References: https://ascon.iaik.tugraz.at/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for ASCON-128 and ASCON-128a. + */ +#define ASCON128_KEY_SIZE 16 + +/** + * \brief Size of the nonce for ASCON-128 and ASCON-128a. + */ +#define ASCON128_NONCE_SIZE 16 + +/** + * \brief Size of the authentication tag for ASCON-128 and ASCON-128a. + */ +#define ASCON128_TAG_SIZE 16 + +/** + * \brief Size of the key for ASCON-80pq. + */ +#define ASCON80PQ_KEY_SIZE 20 + +/** + * \brief Size of the nonce for ASCON-80pq. + */ +#define ASCON80PQ_NONCE_SIZE 16 + +/** + * \brief Size of the authentication tag for ASCON-80pq. + */ +#define ASCON80PQ_TAG_SIZE 16 + +/** + * \brief Size of the hash output for ASCON-HASH. + */ +#define ASCON_HASH_SIZE 32 + +/** + * \brief State information for ASCON-HASH and ASCON-XOF incremental modes. + */ +typedef union +{ + struct { + unsigned char state[40]; /**< Current hash state */ + unsigned char count; /**< Number of bytes in the current block */ + unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} ascon_hash_state_t; + +/** + * \brief Meta-information block for the ASCON-128 cipher. + */ +extern aead_cipher_t const ascon128_cipher; + +/** + * \brief Meta-information block for the ASCON-128a cipher. + */ +extern aead_cipher_t const ascon128a_cipher; + +/** + * \brief Meta-information block for the ASCON-80pq cipher. + */ +extern aead_cipher_t const ascon80pq_cipher; + +/** + * \brief Meta-information block for the ASCON-HASH algorithm. + */ +extern aead_hash_algorithm_t const ascon_hash_algorithm; + +/** + * \brief Meta-information block for the ASCON-XOF algorithm. + */ +extern aead_hash_algorithm_t const ascon_xof_algorithm; + +/** + * \brief Encrypts and authenticates a packet with ASCON-128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa ascon128_aead_decrypt() + */ +int ascon128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ASCON-128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa ascon128_aead_encrypt() + */ +int ascon128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with ASCON-128a. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa ascon128a_aead_decrypt() + */ +int ascon128a_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ASCON-128a. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa ascon128a_aead_encrypt() + */ +int ascon128a_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with ASCON-80pq. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 20 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa ascon80pq_aead_decrypt() + */ +int ascon80pq_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ASCON-80pq. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 20 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa ascon80pq_aead_encrypt() + */ +int ascon80pq_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with ASCON-HASH. + * + * \param out Buffer to receive the hash output which must be at least + * ASCON_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + * + * \sa ascon_hash_init(), ascon_hash_absorb(), ascon_hash_squeeze() + */ +int ascon_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an ASCON-HASH hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa ascon_hash_update(), ascon_hash_finalize(), ascon_hash() + */ +void ascon_hash_init(ascon_hash_state_t *state); + +/** + * \brief Updates an ASCON-HASH state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa ascon_hash_init(), ascon_hash_finalize() + */ +void ascon_hash_update + (ascon_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an ASCON-HASH hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 32-byte hash value. + * + * \sa ascon_hash_init(), ascon_hash_update() + */ +void ascon_hash_finalize + (ascon_hash_state_t *state, unsigned char *out); + +/** + * \brief Hashes a block of input data with ASCON-XOF and generates a + * fixed-length 32 byte output. + * + * \param out Buffer to receive the hash output which must be at least + * ASCON_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + * + * Use ascon_xof_squeeze() instead if you need variable-length XOF ouutput. + * + * \sa ascon_xof_init(), ascon_xof_absorb(), ascon_xof_squeeze() + */ +int ascon_xof + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an ASCON-XOF hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa ascon_xof_absorb(), ascon_xof_squeeze(), ascon_xof() + */ +void ascon_xof_init(ascon_hash_state_t *state); + +/** + * \brief Aborbs more input data into an ASCON-XOF state. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +void ascon_xof_absorb + (ascon_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Squeezes output data from an ASCON-XOF state. + * + * \param state Hash state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + * + * \sa ascon_xof_init(), ascon_xof_update() + */ +void ascon_xof_squeeze + (ascon_hash_state_t *state, unsigned char *out, unsigned long long outlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/hash.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys/hash.c new file mode 100644 index 0000000..6f37a9d --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "ascon128.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return ascon_xof(out, in, inlen); +} diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon-avr.S b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.c b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.c new file mode 100644 index 0000000..657aabe --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.c @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-ascon.h" + +#if !defined(__AVR__) + +void ascon_permute(ascon_state_t *state, uint8_t first_round) +{ + uint64_t t0, t1, t2, t3, t4; +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = be_load_word64(state->B); + uint64_t x1 = be_load_word64(state->B + 8); + uint64_t x2 = be_load_word64(state->B + 16); + uint64_t x3 = be_load_word64(state->B + 24); + uint64_t x4 = be_load_word64(state->B + 32); +#else + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; +#endif + while (first_round < 12) { + /* Add the round constant to the state */ + x2 ^= ((0x0F - first_round) << 4) | first_round; + + /* Substitution layer - apply the s-box using bit-slicing + * according to the algorithm recommended in the specification */ + x0 ^= x4; x4 ^= x3; x2 ^= x1; + t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; + t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; + x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; + x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; + + /* Linear diffusion layer */ + x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); + x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); + x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); + x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); + x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); + + /* Move onto the next round */ + ++first_round; + } +#if defined(LW_UTIL_LITTLE_ENDIAN) + be_store_word64(state->B, x0); + be_store_word64(state->B + 8, x1); + be_store_word64(state->B + 16, x2); + be_store_word64(state->B + 24, x3); + be_store_word64(state->B + 32, x4); +#else + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; +#endif +} + +#endif /* !__AVR__ */ diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.h new file mode 100644 index 0000000..d3fa3ca --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-ascon.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_ASCON_H +#define LW_INTERNAL_ASCON_H + +#include "internal-util.h" + +/** + * \file internal-ascon.h + * \brief Internal implementation of the ASCON permutation. + * + * References: http://competitions.cr.yp.to/round3/asconv12.pdf, + * http://ascon.iaik.tugraz.at/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Structure of the internal state of the ASCON permutation. + */ +typedef union +{ + uint64_t S[5]; /**< Words of the state */ + uint8_t B[40]; /**< Bytes of the state */ + +} ascon_state_t; + +/** + * \brief Permutes the ASCON state. + * + * \param state The ASCON state to be permuted. + * \param first_round The first round (of 12) to be performed; 0, 4, or 6. + * + * The input and output \a state will be in big-endian byte order. + */ +void ascon_permute(ascon_state_t *state, uint8_t first_round); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-util.h b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/ascon/Implementations/crypto_hash/asconxofv12/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/api.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.c deleted file mode 100644 index ceb0fd6..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.c +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "comet.h" -#include "internal-cham.h" -#include "internal-speck64.h" -#include "internal-util.h" -#include - -aead_cipher_t const comet_128_cham_cipher = { - "COMET-128_CHAM-128/128", - COMET_KEY_SIZE, - COMET_128_NONCE_SIZE, - COMET_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_128_cham_aead_encrypt, - comet_128_cham_aead_decrypt -}; - -aead_cipher_t const comet_64_cham_cipher = { - "COMET-64_CHAM-64/128", - COMET_KEY_SIZE, - COMET_64_NONCE_SIZE, - COMET_64_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_64_cham_aead_encrypt, - comet_64_cham_aead_decrypt -}; - -aead_cipher_t const comet_64_speck_cipher = { - "COMET-64_SPECK-64/128", - COMET_KEY_SIZE, - COMET_64_NONCE_SIZE, - COMET_64_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_64_speck_aead_encrypt, - comet_64_speck_aead_decrypt -}; - -/** - * \brief Adjusts the Z state to generate the key to use on the next block. - * - * \param Z The Z state to be adjusted. - */ -static void comet_adjust_block_key(unsigned char Z[16]) -{ - /* Doubles the 64-bit prefix to Z in the F(2^64) field */ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)(Z[7])) >> 7); - for (index = 7; index > 0; --index) - Z[index] = (Z[index] << 1) | (Z[index - 1] >> 7); - Z[0] = (Z[0] << 1) ^ (mask & 0x1B); -} - -/* Function prototype for the encrypt function of the underyling cipher */ -typedef void (*comet_encrypt_block_t) - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -/** - * \brief Processes the associated data for COMET. - * - * \param Y Internal COMET block state of \a block_size bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param block_size Size of the block for the underlying cipher. - * \param encrypt Encryption function for the underlying cipher. - * \param ad Points to the associated data. - * \param adlen Number of bytes of associated data; must be >= 1. - */ -static void comet_process_ad - (unsigned char *Y, unsigned char Z[16], unsigned block_size, - comet_encrypt_block_t encrypt, const unsigned char *ad, - unsigned long long adlen) -{ - /* Domain separator for associated data */ - Z[15] ^= 0x08; - - /* Process all associated data blocks except the last partial block */ - while (adlen >= block_size) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - lw_xor_block(Y, ad, block_size); - ad += block_size; - adlen -= block_size; - } - - /* Pad and process the partial block on the end */ - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - Z[15] ^= 0x10; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - lw_xor_block(Y, ad, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Shuffles the words in a 128-bit block. - * - * \param out The output block after shuffling. - * \param in The input block to be shuffled. - */ -STATIC_INLINE void comet_shuffle_block_128 - (unsigned char out[16], const unsigned char in[16]) -{ - uint32_t x0, x1, x2, x3; - x0 = le_load_word32(in); - x1 = le_load_word32(in + 4); - x2 = le_load_word32(in + 8); - x3 = le_load_word32(in + 12); - le_store_word32(out, x3); - le_store_word32(out + 4, rightRotate1(x2)); - le_store_word32(out + 8, x0); - le_store_word32(out + 12, x1); -} - -/** - * \brief Shuffles the words in a 64-bit block. - * - * \param out The output block after shuffling. - * \param in The input block to be shuffled. - */ -STATIC_INLINE void comet_shuffle_block_64 - (unsigned char out[8], const unsigned char in[8]) -{ - uint32_t x01 = le_load_word32(in); - uint16_t x2 = ((uint16_t)(in[4])) | (((uint16_t)(in[5])) << 8); - out[0] = in[6]; - out[1] = in[7]; - x2 = (x2 >> 1) | (x2 << 15); - out[2] = (uint8_t)x2; - out[3] = (uint8_t)(x2 >> 8); - le_store_word32(out + 4, x01); -} - -/** - * \brief Encrypts the plaintext with COMET-128 to produce the ciphertext. - * - * \param Y Internal COMET block state of 16 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param c Ciphertext on output. - * \param m Plaintext message on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_encrypt_128 - (unsigned char Y[16], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char Ys[16]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 16) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block(Y, m, 16); - lw_xor_block_2_src(c, m, Ys, 16); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block(Y, m, temp); - lw_xor_block_2_src(c, m, Ys, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Encrypts the plaintext with COMET-64 to produce the ciphertext. - * - * \param Y Internal COMET block state of 8 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param c Ciphertext on output. - * \param m Plaintext message on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_encrypt_64 - (unsigned char Y[8], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char Ys[8]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 8) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block(Y, m, 8); - lw_xor_block_2_src(c, m, Ys, 8); - c += 8; - m += 8; - mlen -= 8; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block(Y, m, temp); - lw_xor_block_2_src(c, m, Ys, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Decrypts the ciphertext with COMET-128 to produce the plaintext. - * - * \param Y Internal COMET block state of 16 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param m Plaintext message on output. - * \param c Ciphertext on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_decrypt_128 - (unsigned char Y[16], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char Ys[16]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 16) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block_2_src(m, c, Ys, 16); - lw_xor_block(Y, m, 16); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block_2_src(m, c, Ys, temp); - lw_xor_block(Y, m, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Decrypts the ciphertext with COMET-64 to produce the plaintext. - * - * \param Y Internal COMET block state of 8 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param m Plaintext message on output. - * \param c Ciphertext on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_decrypt_64 - (unsigned char Y[8], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char Ys[8]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 8) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block_2_src(m, c, Ys, 8); - lw_xor_block(Y, m, 8); - c += 8; - m += 8; - mlen -= 8; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block_2_src(m, c, Ys, temp); - lw_xor_block(Y, m, temp); - Y[temp] ^= 0x01; - } -} - -int comet_128_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[16]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_128_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memcpy(Y, k, 16); - cham128_128_encrypt(Y, Z, npub); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 16, cham128_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_128(Y, Z, cham128_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham128_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_128_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[16]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_128_TAG_SIZE) - return -1; - *mlen = clen - COMET_128_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memcpy(Y, k, 16); - cham128_128_encrypt(Y, Z, npub); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 16, cham128_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_128_TAG_SIZE) - comet_decrypt_128(Y, Z, cham128_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham128_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_128_TAG_SIZE); -} - -int comet_64_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - cham64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, cham64_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_64(Y, Z, cham64_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham64_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_64_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_64_TAG_SIZE) - return -1; - *mlen = clen - COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - cham64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, cham64_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, cham64_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham64_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); -} - -int comet_64_speck_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - speck64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - speck64_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_64_speck_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_64_TAG_SIZE) - return -1; - *mlen = clen - COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - speck64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - speck64_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); -} diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.h deleted file mode 100644 index d1b24a6..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/comet.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_COMET_H -#define LWCRYPTO_COMET_H - -#include "aead-common.h" - -/** - * \file comet.h - * \brief COMET authenticated encryption algorithm. - * - * COMET is a family of authenticated encryption algorithms that are - * built around an underlying block cipher. This library implements - * three members of the family: - * - * \li COMET-128_CHAM-128/128 which has a 128-bit key, a 128-bit nonce, - * and a 128-bit tag, built around the CHAM-128/128 block cipher. - * \li COMET-64_CHAM-64/128 which has a 128-bit key, a 120-bit nonce, - * and a 64-bit tag, built around the CHAM-64/128 block cipher. - * \li COMET-64_SPECK-64/128 which has a 128-bit key, a 120-bit nonce, - * and a 64-bit tag, built around the SPECK-64/128 block cipher. - * - * There is also another family member that is built around AES but - * this library does not implement that version. - * - * References: https://www.isical.ac.in/~lightweight/comet/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all COMET family members. - */ -#define COMET_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for the 128-bit versions of COMET. - */ -#define COMET_128_TAG_SIZE 16 - -/** - * \brief Size of the authentication tag for the 64-bit versions of COMET. - */ -#define COMET_64_TAG_SIZE 8 - -/** - * \brief Size of the nonce for the 128-bit versions of COMET. - */ -#define COMET_128_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for the 64-bit versions of COMET. - */ -#define COMET_64_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the COMET-128_CHAM-128/128 cipher. - */ -extern aead_cipher_t const comet_128_cham_cipher; - -/** - * \brief Meta-information block for the COMET-64_CHAM-64/128 cipher. - */ -extern aead_cipher_t const comet_64_cham_cipher; - -/** - * \brief Meta-information block for the COMET-64_SPECK-64/128 cipher. - */ -extern aead_cipher_t const comet_64_speck_cipher; - -/** - * \brief Encrypts and authenticates a packet with COMET-128_CHAM-128/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_128_cham_aead_decrypt() - */ -int comet_128_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-128_CHAM-128/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_128_cham_aead_encrypt() - */ -int comet_128_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with COMET-64_CHAM-64/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_64_cham_aead_decrypt() - */ -int comet_64_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-64_CHAM-64/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_64_cham_aead_encrypt() - */ -int comet_64_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with COMET-64_SPECK-64/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_64_speck_aead_decrypt() - */ -int comet_64_speck_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-64_SPECK-64/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_64_speck_aead_encrypt() - */ -int comet_64_speck_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/encrypt.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/encrypt.c deleted file mode 100644 index 66c5ad7..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "comet.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return comet_128_cham_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return comet_128_cham_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham-avr.S b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham-avr.S deleted file mode 100644 index 514a09a..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham-avr.S +++ /dev/null @@ -1,915 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global cham128_128_encrypt - .type cham128_128_encrypt, @function -cham128_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 48 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+21,r2 - std Y+22,r3 - std Y+23,r4 - std Y+24,r5 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+17,r2 - std Y+18,r3 - std Y+19,r4 - std Y+20,r5 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+29,r2 - std Y+30,r3 - std Y+31,r4 - std Y+32,r5 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+25,r2 - std Y+26,r3 - std Y+27,r4 - std Y+28,r5 - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r18,X+ - ld r22,X+ - ld r23,X+ - ld r24,X+ - ld r25,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - mov r30,r1 -197: - eor r19,r30 - movw r10,r22 - movw r12,r24 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+1 - eor r10,r0 - ldd r0,Y+2 - eor r11,r0 - ldd r0,Y+3 - eor r12,r0 - ldd r0,Y+4 - eor r13,r0 - add r19,r10 - adc r20,r11 - adc r21,r12 - adc r18,r13 - inc r30 - eor r22,r30 - mov r0,r5 - mov r5,r4 - mov r4,r3 - mov r3,r2 - mov r2,r0 - ldd r10,Y+5 - ldd r11,Y+6 - ldd r12,Y+7 - ldd r13,Y+8 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - add r22,r10 - adc r23,r11 - adc r24,r12 - adc r25,r13 - lsl r22 - rol r23 - rol r24 - rol r25 - adc r22,r1 - inc r30 - eor r3,r30 - movw r10,r6 - movw r12,r8 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+9 - eor r10,r0 - ldd r0,Y+10 - eor r11,r0 - ldd r0,Y+11 - eor r12,r0 - ldd r0,Y+12 - eor r13,r0 - add r3,r10 - adc r4,r11 - adc r5,r12 - adc r2,r13 - inc r30 - eor r6,r30 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r10,Y+13 - ldd r11,Y+14 - ldd r12,Y+15 - ldd r13,Y+16 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - add r6,r10 - adc r7,r11 - adc r8,r12 - adc r9,r13 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - inc r30 - eor r19,r30 - movw r10,r22 - movw r12,r24 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+17 - eor r10,r0 - ldd r0,Y+18 - eor r11,r0 - ldd r0,Y+19 - eor r12,r0 - ldd r0,Y+20 - eor r13,r0 - add r19,r10 - adc r20,r11 - adc r21,r12 - adc r18,r13 - inc r30 - eor r22,r30 - mov r0,r5 - mov r5,r4 - mov r4,r3 - mov r3,r2 - mov r2,r0 - ldd r10,Y+21 - ldd r11,Y+22 - ldd r12,Y+23 - ldd r13,Y+24 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - add r22,r10 - adc r23,r11 - adc r24,r12 - adc r25,r13 - lsl r22 - rol r23 - rol r24 - rol r25 - adc r22,r1 - inc r30 - eor r3,r30 - movw r10,r6 - movw r12,r8 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+25 - eor r10,r0 - ldd r0,Y+26 - eor r11,r0 - ldd r0,Y+27 - eor r12,r0 - ldd r0,Y+28 - eor r13,r0 - add r3,r10 - adc r4,r11 - adc r5,r12 - adc r2,r13 - inc r30 - eor r6,r30 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r10,Y+29 - ldd r11,Y+30 - ldd r12,Y+31 - ldd r13,Y+32 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - add r6,r10 - adc r7,r11 - adc r8,r12 - adc r9,r13 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - inc r30 - ldi r31,80 - cpse r30,r31 - rjmp 197b - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r18 - st X+,r22 - st X+,r23 - st X+,r24 - st X+,r25 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size cham128_128_encrypt, .-cham128_128_encrypt - - .text -.global cham64_128_encrypt - .type cham64_128_encrypt, @function -cham64_128_encrypt: - push r28 - push r29 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 38 - ld r18,Z - ldd r19,Z+1 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+1,r18 - std Y+2,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+19,r22 - std Y+20,r23 - ldd r18,Z+2 - ldd r19,Z+3 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+3,r18 - std Y+4,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+17,r22 - std Y+18,r23 - ldd r18,Z+4 - ldd r19,Z+5 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+5,r18 - std Y+6,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+23,r22 - std Y+24,r23 - ldd r18,Z+6 - ldd r19,Z+7 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+7,r18 - std Y+8,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+21,r22 - std Y+22,r23 - ldd r18,Z+8 - ldd r19,Z+9 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+9,r18 - std Y+10,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+27,r22 - std Y+28,r23 - ldd r18,Z+10 - ldd r19,Z+11 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+11,r18 - std Y+12,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+25,r22 - std Y+26,r23 - ldd r18,Z+12 - ldd r19,Z+13 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+13,r18 - std Y+14,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+31,r22 - std Y+32,r23 - ldd r18,Z+14 - ldd r19,Z+15 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+15,r18 - std Y+16,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+29,r22 - std Y+30,r23 - ld r19,X+ - ld r18,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r24,X+ - ld r25,X+ - mov r16,r1 -201: - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+1 - eor r30,r0 - ldd r0,Y+2 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+3 - ldd r31,Y+4 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+5 - eor r30,r0 - ldd r0,Y+6 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+7 - ldd r31,Y+8 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+9 - eor r30,r0 - ldd r0,Y+10 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+11 - ldd r31,Y+12 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+13 - eor r30,r0 - ldd r0,Y+14 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+15 - ldd r31,Y+16 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+17 - eor r30,r0 - ldd r0,Y+18 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+19 - ldd r31,Y+20 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+21 - eor r30,r0 - ldd r0,Y+22 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+23 - ldd r31,Y+24 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+25 - eor r30,r0 - ldd r0,Y+26 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+27 - ldd r31,Y+28 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+29 - eor r30,r0 - ldd r0,Y+30 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+31 - ldd r31,Y+32 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - ldi r17,80 - cpse r16,r17 - rjmp 201b - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r19 - st X+,r18 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r24 - st X+,r25 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r29 - pop r28 - ret - .size cham64_128_encrypt, .-cham64_128_encrypt - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.c deleted file mode 100644 index 23351a3..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-cham.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -void cham128_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t x0, x1, x2, x3; - uint32_t k[8]; - uint8_t round; - - /* Unpack the key and generate the key schedule */ - k[0] = le_load_word32(key); - k[1] = le_load_word32(key + 4); - k[2] = le_load_word32(key + 8); - k[3] = le_load_word32(key + 12); - k[4] = k[1] ^ leftRotate1(k[1]) ^ leftRotate11(k[1]); - k[5] = k[0] ^ leftRotate1(k[0]) ^ leftRotate11(k[0]); - k[6] = k[3] ^ leftRotate1(k[3]) ^ leftRotate11(k[3]); - k[7] = k[2] ^ leftRotate1(k[2]) ^ leftRotate11(k[2]); - k[0] ^= leftRotate1(k[0]) ^ leftRotate8(k[0]); - k[1] ^= leftRotate1(k[1]) ^ leftRotate8(k[1]); - k[2] ^= leftRotate1(k[2]) ^ leftRotate8(k[2]); - k[3] ^= leftRotate1(k[3]) ^ leftRotate8(k[3]); - - /* Unpack the input block */ - x0 = le_load_word32(input); - x1 = le_load_word32(input + 4); - x2 = le_load_word32(input + 8); - x3 = le_load_word32(input + 12); - - /* Perform the 80 rounds eight at a time */ - for (round = 0; round < 80; round += 8) { - x0 = leftRotate8((x0 ^ round) + (leftRotate1(x1) ^ k[0])); - x1 = leftRotate1((x1 ^ (round + 1)) + (leftRotate8(x2) ^ k[1])); - x2 = leftRotate8((x2 ^ (round + 2)) + (leftRotate1(x3) ^ k[2])); - x3 = leftRotate1((x3 ^ (round + 3)) + (leftRotate8(x0) ^ k[3])); - x0 = leftRotate8((x0 ^ (round + 4)) + (leftRotate1(x1) ^ k[4])); - x1 = leftRotate1((x1 ^ (round + 5)) + (leftRotate8(x2) ^ k[5])); - x2 = leftRotate8((x2 ^ (round + 6)) + (leftRotate1(x3) ^ k[6])); - x3 = leftRotate1((x3 ^ (round + 7)) + (leftRotate8(x0) ^ k[7])); - } - - /* Pack the state into the output block */ - le_store_word32(output, x0); - le_store_word32(output + 4, x1); - le_store_word32(output + 8, x2); - le_store_word32(output + 12, x3); -} - -void cham64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint16_t x0, x1, x2, x3; - uint16_t k[16]; - uint8_t round; - - /* Unpack the key and generate the key schedule */ - k[0] = le_load_word16(key); - k[1] = le_load_word16(key + 2); - k[2] = le_load_word16(key + 4); - k[3] = le_load_word16(key + 6); - k[4] = le_load_word16(key + 8); - k[5] = le_load_word16(key + 10); - k[6] = le_load_word16(key + 12); - k[7] = le_load_word16(key + 14); - k[8] = k[1] ^ leftRotate1_16(k[1]) ^ leftRotate11_16(k[1]); - k[9] = k[0] ^ leftRotate1_16(k[0]) ^ leftRotate11_16(k[0]); - k[10] = k[3] ^ leftRotate1_16(k[3]) ^ leftRotate11_16(k[3]); - k[11] = k[2] ^ leftRotate1_16(k[2]) ^ leftRotate11_16(k[2]); - k[12] = k[5] ^ leftRotate1_16(k[5]) ^ leftRotate11_16(k[5]); - k[13] = k[4] ^ leftRotate1_16(k[4]) ^ leftRotate11_16(k[4]); - k[14] = k[7] ^ leftRotate1_16(k[7]) ^ leftRotate11_16(k[7]); - k[15] = k[6] ^ leftRotate1_16(k[6]) ^ leftRotate11_16(k[6]); - k[0] ^= leftRotate1_16(k[0]) ^ leftRotate8_16(k[0]); - k[1] ^= leftRotate1_16(k[1]) ^ leftRotate8_16(k[1]); - k[2] ^= leftRotate1_16(k[2]) ^ leftRotate8_16(k[2]); - k[3] ^= leftRotate1_16(k[3]) ^ leftRotate8_16(k[3]); - k[4] ^= leftRotate1_16(k[4]) ^ leftRotate8_16(k[4]); - k[5] ^= leftRotate1_16(k[5]) ^ leftRotate8_16(k[5]); - k[6] ^= leftRotate1_16(k[6]) ^ leftRotate8_16(k[6]); - k[7] ^= leftRotate1_16(k[7]) ^ leftRotate8_16(k[7]); - - /* Unpack the input block */ - x0 = le_load_word16(input); - x1 = le_load_word16(input + 2); - x2 = le_load_word16(input + 4); - x3 = le_load_word16(input + 6); - - /* Perform the 80 rounds four at a time */ - for (round = 0; round < 80; round += 4) { - x0 = leftRotate8_16 - ((x0 ^ round) + - (leftRotate1_16(x1) ^ k[round % 16])); - x1 = leftRotate1_16 - ((x1 ^ (round + 1)) + - (leftRotate8_16(x2) ^ k[(round + 1) % 16])); - x2 = leftRotate8_16 - ((x2 ^ (round + 2)) + - (leftRotate1_16(x3) ^ k[(round + 2) % 16])); - x3 = leftRotate1_16 - ((x3 ^ (round + 3)) + - (leftRotate8_16(x0) ^ k[(round + 3) % 16])); - } - - /* Pack the state into the output block */ - le_store_word16(output, x0); - le_store_word16(output + 2, x1); - le_store_word16(output + 4, x2); - le_store_word16(output + 6, x3); -} - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.h deleted file mode 100644 index 29d5ccf..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-cham.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_CHAM_H -#define LW_INTERNAL_CHAM_H - -/** - * \file internal-cham.h - * \brief CHAM block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a 128-bit block with CHAM-128-128. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void cham128_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 64-bit block with CHAM-64-128. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void cham64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64-avr.S b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64-avr.S deleted file mode 100644 index d8d641e..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64-avr.S +++ /dev/null @@ -1,272 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global speck64_128_encrypt - .type speck64_128_encrypt, @function -speck64_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ld r14,X+ - ld r15,X+ - ld r24,X+ - ld r25,X+ - ld r30,X+ - ld r31,X+ - ld r12,X+ - ld r13,X+ - mov r16,r1 -25: - add r31,r14 - adc r12,r15 - adc r13,r24 - adc r30,r25 - eor r31,r18 - eor r12,r19 - eor r13,r20 - eor r30,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r31 - eor r15,r12 - eor r24,r13 - eor r25,r30 - mov r0,r22 - mov r22,r23 - add r22,r18 - mov r23,r2 - adc r23,r19 - mov r2,r3 - adc r2,r20 - mov r3,r0 - adc r3,r21 - eor r22,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - inc r16 - add r12,r14 - adc r13,r15 - adc r30,r24 - adc r31,r25 - eor r12,r18 - eor r13,r19 - eor r30,r20 - eor r31,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r12 - eor r15,r13 - eor r24,r30 - eor r25,r31 - mov r0,r4 - mov r4,r5 - add r4,r18 - mov r5,r6 - adc r5,r19 - mov r6,r7 - adc r6,r20 - mov r7,r0 - adc r7,r21 - eor r4,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - inc r16 - add r13,r14 - adc r30,r15 - adc r31,r24 - adc r12,r25 - eor r13,r18 - eor r30,r19 - eor r31,r20 - eor r12,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r13 - eor r15,r30 - eor r24,r31 - eor r25,r12 - mov r0,r12 - mov r12,r31 - mov r31,r30 - mov r30,r13 - mov r13,r0 - mov r0,r8 - mov r8,r9 - add r8,r18 - mov r9,r10 - adc r9,r19 - mov r10,r11 - adc r10,r20 - mov r11,r0 - adc r11,r21 - eor r8,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - inc r16 - ldi r17,27 - cpse r16,r17 - rjmp 25b - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - st X+,r30 - st X+,r31 - st X+,r12 - st X+,r13 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size speck64_128_encrypt, .-speck64_128_encrypt - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.c deleted file mode 100644 index 494c801..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-speck64.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -void speck64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t l0, l1, l2, s; - uint32_t x, y; - uint8_t round; - - /* Unpack the key and the input block */ - s = le_load_word32(key); - l0 = le_load_word32(key + 4); - l1 = le_load_word32(key + 8); - l2 = le_load_word32(key + 12); - y = le_load_word32(input); - x = le_load_word32(input + 4); - - /* Perform all 27 encryption rounds, in groups of 3 */ - #define round_xy() \ - do { \ - x = (rightRotate8(x) + y) ^ s; \ - y = leftRotate3(y) ^ x; \ - } while (0) - #define schedule(l) \ - do { \ - l = (s + rightRotate8(l)) ^ round; \ - s = leftRotate3(s) ^ l; \ - ++round; \ - } while (0) - for (round = 0; round < 27; ) { - round_xy(); - schedule(l0); - round_xy(); - schedule(l1); - round_xy(); - schedule(l2); - } - - /* Write the result to the output */ - le_store_word32(output, y); - le_store_word32(output + 4, x); -} - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.h deleted file mode 100644 index fdf840a..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-speck64.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPECK64_H -#define LW_INTERNAL_SPECK64_H - -/** - * \file internal-speck64.h - * \brief SPECK-64 block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \note This version expects the key, input, and output to be in - * little-endian byte order, as expected by the COMET specification. - */ -void speck64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-util.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys/comet.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys/comet.c index d068de2..ceb0fd6 100644 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys/comet.c +++ b/comet/Implementations/crypto_aead/comet128chamv1/rhys/comet.c @@ -22,6 +22,7 @@ #include "comet.h" #include "internal-cham.h" +#include "internal-speck64.h" #include "internal-util.h" #include @@ -478,58 +479,6 @@ int comet_64_cham_aead_decrypt return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); } -/** - * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \note This version differs from standard SPECK-64 in that it uses the - * little-endian byte order from the COMET specification which is different - * from the big-endian byte order from the original SPECK paper. - */ -static void speck64_128_comet_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t l[4]; - uint32_t x, y, s; - uint8_t round; - uint8_t li_in = 0; - uint8_t li_out = 3; - - /* Unpack the key and the input block */ - s = le_load_word32(key); - l[0] = le_load_word32(key + 4); - l[1] = le_load_word32(key + 8); - l[2] = le_load_word32(key + 12); - y = le_load_word32(input); - x = le_load_word32(input + 4); - - /* Perform all encryption rounds except the last */ - for (round = 0; round < 26; ++round) { - /* Perform the round with the current key schedule word */ - x = (rightRotate8(x) + y) ^ s; - y = leftRotate3(y) ^ x; - - /* Calculate the next key schedule word */ - l[li_out] = (s + rightRotate8(l[li_in])) ^ round; - s = leftRotate3(s) ^ l[li_out]; - li_in = (li_in + 1) & 0x03; - li_out = (li_out + 1) & 0x03; - } - - /* Perform the last encryption round and write the result to the output */ - x = (rightRotate8(x) + y) ^ s; - y = leftRotate3(y) ^ x; - le_store_word32(output, y); - le_store_word32(output + 4, x); -} - int comet_64_speck_aead_encrypt (unsigned char *c, unsigned long long *clen, const unsigned char *m, unsigned long long mlen, @@ -547,23 +496,23 @@ int comet_64_speck_aead_encrypt /* Set up the initial state of Y and Z */ memset(Y, 0, 8); - speck64_128_comet_encrypt(k, Y, Y); + speck64_128_encrypt(k, Y, Y); memcpy(Z, npub, 15); Z[15] = 0; lw_xor_block(Z, k, 16); /* Process the associated data */ if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen); + comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); /* Encrypt the plaintext to produce the ciphertext */ if (mlen > 0) - comet_encrypt_64(Y, Z, speck64_128_comet_encrypt, c, m, mlen); + comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen); /* Generate the authentication tag */ Z[15] ^= 0x80; comet_adjust_block_key(Z); - speck64_128_comet_encrypt(Z, c + mlen, Y); + speck64_128_encrypt(Z, c + mlen, Y); return 0; } @@ -586,22 +535,22 @@ int comet_64_speck_aead_decrypt /* Set up the initial state of Y and Z */ memset(Y, 0, 8); - speck64_128_comet_encrypt(k, Y, Y); + speck64_128_encrypt(k, Y, Y); memcpy(Z, npub, 15); Z[15] = 0; lw_xor_block(Z, k, 16); /* Process the associated data */ if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen); + comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); /* Decrypt the ciphertext to produce the plaintext */ if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, speck64_128_comet_encrypt, m, c, *mlen); + comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen); /* Check the authentication tag */ Z[15] ^= 0x80; comet_adjust_block_key(Z); - speck64_128_comet_encrypt(Z, Y, Y); + speck64_128_encrypt(Z, Y, Y); return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); } diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham-avr.S b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham-avr.S new file mode 100644 index 0000000..514a09a --- /dev/null +++ b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham-avr.S @@ -0,0 +1,915 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global cham128_128_encrypt + .type cham128_128_encrypt, @function +cham128_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 48 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+21,r2 + std Y+22,r3 + std Y+23,r4 + std Y+24,r5 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+17,r2 + std Y+18,r3 + std Y+19,r4 + std Y+20,r5 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+29,r2 + std Y+30,r3 + std Y+31,r4 + std Y+32,r5 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+25,r2 + std Y+26,r3 + std Y+27,r4 + std Y+28,r5 + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r18,X+ + ld r22,X+ + ld r23,X+ + ld r24,X+ + ld r25,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + mov r30,r1 +197: + eor r19,r30 + movw r10,r22 + movw r12,r24 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+1 + eor r10,r0 + ldd r0,Y+2 + eor r11,r0 + ldd r0,Y+3 + eor r12,r0 + ldd r0,Y+4 + eor r13,r0 + add r19,r10 + adc r20,r11 + adc r21,r12 + adc r18,r13 + inc r30 + eor r22,r30 + mov r0,r5 + mov r5,r4 + mov r4,r3 + mov r3,r2 + mov r2,r0 + ldd r10,Y+5 + ldd r11,Y+6 + ldd r12,Y+7 + ldd r13,Y+8 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + add r22,r10 + adc r23,r11 + adc r24,r12 + adc r25,r13 + lsl r22 + rol r23 + rol r24 + rol r25 + adc r22,r1 + inc r30 + eor r3,r30 + movw r10,r6 + movw r12,r8 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+9 + eor r10,r0 + ldd r0,Y+10 + eor r11,r0 + ldd r0,Y+11 + eor r12,r0 + ldd r0,Y+12 + eor r13,r0 + add r3,r10 + adc r4,r11 + adc r5,r12 + adc r2,r13 + inc r30 + eor r6,r30 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r10,Y+13 + ldd r11,Y+14 + ldd r12,Y+15 + ldd r13,Y+16 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + add r6,r10 + adc r7,r11 + adc r8,r12 + adc r9,r13 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + inc r30 + eor r19,r30 + movw r10,r22 + movw r12,r24 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+17 + eor r10,r0 + ldd r0,Y+18 + eor r11,r0 + ldd r0,Y+19 + eor r12,r0 + ldd r0,Y+20 + eor r13,r0 + add r19,r10 + adc r20,r11 + adc r21,r12 + adc r18,r13 + inc r30 + eor r22,r30 + mov r0,r5 + mov r5,r4 + mov r4,r3 + mov r3,r2 + mov r2,r0 + ldd r10,Y+21 + ldd r11,Y+22 + ldd r12,Y+23 + ldd r13,Y+24 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + add r22,r10 + adc r23,r11 + adc r24,r12 + adc r25,r13 + lsl r22 + rol r23 + rol r24 + rol r25 + adc r22,r1 + inc r30 + eor r3,r30 + movw r10,r6 + movw r12,r8 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+25 + eor r10,r0 + ldd r0,Y+26 + eor r11,r0 + ldd r0,Y+27 + eor r12,r0 + ldd r0,Y+28 + eor r13,r0 + add r3,r10 + adc r4,r11 + adc r5,r12 + adc r2,r13 + inc r30 + eor r6,r30 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r10,Y+29 + ldd r11,Y+30 + ldd r12,Y+31 + ldd r13,Y+32 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + add r6,r10 + adc r7,r11 + adc r8,r12 + adc r9,r13 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + inc r30 + ldi r31,80 + cpse r30,r31 + rjmp 197b + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r18 + st X+,r22 + st X+,r23 + st X+,r24 + st X+,r25 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size cham128_128_encrypt, .-cham128_128_encrypt + + .text +.global cham64_128_encrypt + .type cham64_128_encrypt, @function +cham64_128_encrypt: + push r28 + push r29 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 38 + ld r18,Z + ldd r19,Z+1 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+1,r18 + std Y+2,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+19,r22 + std Y+20,r23 + ldd r18,Z+2 + ldd r19,Z+3 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+3,r18 + std Y+4,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+17,r22 + std Y+18,r23 + ldd r18,Z+4 + ldd r19,Z+5 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+5,r18 + std Y+6,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+23,r22 + std Y+24,r23 + ldd r18,Z+6 + ldd r19,Z+7 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+7,r18 + std Y+8,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+21,r22 + std Y+22,r23 + ldd r18,Z+8 + ldd r19,Z+9 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+9,r18 + std Y+10,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+27,r22 + std Y+28,r23 + ldd r18,Z+10 + ldd r19,Z+11 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+11,r18 + std Y+12,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+25,r22 + std Y+26,r23 + ldd r18,Z+12 + ldd r19,Z+13 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+13,r18 + std Y+14,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+31,r22 + std Y+32,r23 + ldd r18,Z+14 + ldd r19,Z+15 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+15,r18 + std Y+16,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+29,r22 + std Y+30,r23 + ld r19,X+ + ld r18,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r24,X+ + ld r25,X+ + mov r16,r1 +201: + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+1 + eor r30,r0 + ldd r0,Y+2 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+3 + ldd r31,Y+4 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+5 + eor r30,r0 + ldd r0,Y+6 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+7 + ldd r31,Y+8 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+9 + eor r30,r0 + ldd r0,Y+10 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+11 + ldd r31,Y+12 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+13 + eor r30,r0 + ldd r0,Y+14 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+15 + ldd r31,Y+16 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+17 + eor r30,r0 + ldd r0,Y+18 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+19 + ldd r31,Y+20 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+21 + eor r30,r0 + ldd r0,Y+22 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+23 + ldd r31,Y+24 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+25 + eor r30,r0 + ldd r0,Y+26 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+27 + ldd r31,Y+28 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+29 + eor r30,r0 + ldd r0,Y+30 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+31 + ldd r31,Y+32 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + ldi r17,80 + cpse r16,r17 + rjmp 201b + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r19 + st X+,r18 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r24 + st X+,r25 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r29 + pop r28 + ret + .size cham64_128_encrypt, .-cham64_128_encrypt + +#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham.c index e097dbd..23351a3 100644 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham.c +++ b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-cham.c @@ -23,6 +23,8 @@ #include "internal-cham.h" #include "internal-util.h" +#if !defined(__AVR__) + void cham128_128_encrypt (const unsigned char *key, unsigned char *output, const unsigned char *input) @@ -132,3 +134,5 @@ void cham64_128_encrypt le_store_word16(output + 4, x2); le_store_word16(output + 6, x3); } + +#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64-avr.S b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64-avr.S new file mode 100644 index 0000000..d8d641e --- /dev/null +++ b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64-avr.S @@ -0,0 +1,272 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global speck64_128_encrypt + .type speck64_128_encrypt, @function +speck64_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ld r14,X+ + ld r15,X+ + ld r24,X+ + ld r25,X+ + ld r30,X+ + ld r31,X+ + ld r12,X+ + ld r13,X+ + mov r16,r1 +25: + add r31,r14 + adc r12,r15 + adc r13,r24 + adc r30,r25 + eor r31,r18 + eor r12,r19 + eor r13,r20 + eor r30,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r31 + eor r15,r12 + eor r24,r13 + eor r25,r30 + mov r0,r22 + mov r22,r23 + add r22,r18 + mov r23,r2 + adc r23,r19 + mov r2,r3 + adc r2,r20 + mov r3,r0 + adc r3,r21 + eor r22,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + inc r16 + add r12,r14 + adc r13,r15 + adc r30,r24 + adc r31,r25 + eor r12,r18 + eor r13,r19 + eor r30,r20 + eor r31,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r12 + eor r15,r13 + eor r24,r30 + eor r25,r31 + mov r0,r4 + mov r4,r5 + add r4,r18 + mov r5,r6 + adc r5,r19 + mov r6,r7 + adc r6,r20 + mov r7,r0 + adc r7,r21 + eor r4,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + inc r16 + add r13,r14 + adc r30,r15 + adc r31,r24 + adc r12,r25 + eor r13,r18 + eor r30,r19 + eor r31,r20 + eor r12,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r13 + eor r15,r30 + eor r24,r31 + eor r25,r12 + mov r0,r12 + mov r12,r31 + mov r31,r30 + mov r30,r13 + mov r13,r0 + mov r0,r8 + mov r8,r9 + add r8,r18 + mov r9,r10 + adc r9,r19 + mov r10,r11 + adc r10,r20 + mov r11,r0 + adc r11,r21 + eor r8,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + inc r16 + ldi r17,27 + cpse r16,r17 + rjmp 25b + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + st X+,r30 + st X+,r31 + st X+,r12 + st X+,r13 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size speck64_128_encrypt, .-speck64_128_encrypt + +#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.c b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.c new file mode 100644 index 0000000..494c801 --- /dev/null +++ b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.c @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-speck64.h" +#include "internal-util.h" + +#if !defined(__AVR__) + +void speck64_128_encrypt + (const unsigned char *key, unsigned char *output, + const unsigned char *input) +{ + uint32_t l0, l1, l2, s; + uint32_t x, y; + uint8_t round; + + /* Unpack the key and the input block */ + s = le_load_word32(key); + l0 = le_load_word32(key + 4); + l1 = le_load_word32(key + 8); + l2 = le_load_word32(key + 12); + y = le_load_word32(input); + x = le_load_word32(input + 4); + + /* Perform all 27 encryption rounds, in groups of 3 */ + #define round_xy() \ + do { \ + x = (rightRotate8(x) + y) ^ s; \ + y = leftRotate3(y) ^ x; \ + } while (0) + #define schedule(l) \ + do { \ + l = (s + rightRotate8(l)) ^ round; \ + s = leftRotate3(s) ^ l; \ + ++round; \ + } while (0) + for (round = 0; round < 27; ) { + round_xy(); + schedule(l0); + round_xy(); + schedule(l1); + round_xy(); + schedule(l2); + } + + /* Write the result to the output */ + le_store_word32(output, y); + le_store_word32(output + 4, x); +} + +#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.h new file mode 100644 index 0000000..fdf840a --- /dev/null +++ b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-speck64.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SPECK64_H +#define LW_INTERNAL_SPECK64_H + +/** + * \file internal-speck64.h + * \brief SPECK-64 block cipher. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. + * + * \param key Points to the 16 bytes of the key. + * \param output Output buffer which must be at least 8 bytes in length. + * \param input Input buffer which must be at least 8 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * \note This version expects the key, input, and output to be in + * little-endian byte order, as expected by the COMET specification. + */ +void speck64_128_encrypt + (const unsigned char *key, unsigned char *output, + const unsigned char *input); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-util.h b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-util.h +++ b/comet/Implementations/crypto_aead/comet128chamv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/api.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/api.h deleted file mode 100644 index 9f9959f..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 15 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.c deleted file mode 100644 index ceb0fd6..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.c +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "comet.h" -#include "internal-cham.h" -#include "internal-speck64.h" -#include "internal-util.h" -#include - -aead_cipher_t const comet_128_cham_cipher = { - "COMET-128_CHAM-128/128", - COMET_KEY_SIZE, - COMET_128_NONCE_SIZE, - COMET_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_128_cham_aead_encrypt, - comet_128_cham_aead_decrypt -}; - -aead_cipher_t const comet_64_cham_cipher = { - "COMET-64_CHAM-64/128", - COMET_KEY_SIZE, - COMET_64_NONCE_SIZE, - COMET_64_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_64_cham_aead_encrypt, - comet_64_cham_aead_decrypt -}; - -aead_cipher_t const comet_64_speck_cipher = { - "COMET-64_SPECK-64/128", - COMET_KEY_SIZE, - COMET_64_NONCE_SIZE, - COMET_64_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_64_speck_aead_encrypt, - comet_64_speck_aead_decrypt -}; - -/** - * \brief Adjusts the Z state to generate the key to use on the next block. - * - * \param Z The Z state to be adjusted. - */ -static void comet_adjust_block_key(unsigned char Z[16]) -{ - /* Doubles the 64-bit prefix to Z in the F(2^64) field */ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)(Z[7])) >> 7); - for (index = 7; index > 0; --index) - Z[index] = (Z[index] << 1) | (Z[index - 1] >> 7); - Z[0] = (Z[0] << 1) ^ (mask & 0x1B); -} - -/* Function prototype for the encrypt function of the underyling cipher */ -typedef void (*comet_encrypt_block_t) - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -/** - * \brief Processes the associated data for COMET. - * - * \param Y Internal COMET block state of \a block_size bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param block_size Size of the block for the underlying cipher. - * \param encrypt Encryption function for the underlying cipher. - * \param ad Points to the associated data. - * \param adlen Number of bytes of associated data; must be >= 1. - */ -static void comet_process_ad - (unsigned char *Y, unsigned char Z[16], unsigned block_size, - comet_encrypt_block_t encrypt, const unsigned char *ad, - unsigned long long adlen) -{ - /* Domain separator for associated data */ - Z[15] ^= 0x08; - - /* Process all associated data blocks except the last partial block */ - while (adlen >= block_size) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - lw_xor_block(Y, ad, block_size); - ad += block_size; - adlen -= block_size; - } - - /* Pad and process the partial block on the end */ - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - Z[15] ^= 0x10; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - lw_xor_block(Y, ad, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Shuffles the words in a 128-bit block. - * - * \param out The output block after shuffling. - * \param in The input block to be shuffled. - */ -STATIC_INLINE void comet_shuffle_block_128 - (unsigned char out[16], const unsigned char in[16]) -{ - uint32_t x0, x1, x2, x3; - x0 = le_load_word32(in); - x1 = le_load_word32(in + 4); - x2 = le_load_word32(in + 8); - x3 = le_load_word32(in + 12); - le_store_word32(out, x3); - le_store_word32(out + 4, rightRotate1(x2)); - le_store_word32(out + 8, x0); - le_store_word32(out + 12, x1); -} - -/** - * \brief Shuffles the words in a 64-bit block. - * - * \param out The output block after shuffling. - * \param in The input block to be shuffled. - */ -STATIC_INLINE void comet_shuffle_block_64 - (unsigned char out[8], const unsigned char in[8]) -{ - uint32_t x01 = le_load_word32(in); - uint16_t x2 = ((uint16_t)(in[4])) | (((uint16_t)(in[5])) << 8); - out[0] = in[6]; - out[1] = in[7]; - x2 = (x2 >> 1) | (x2 << 15); - out[2] = (uint8_t)x2; - out[3] = (uint8_t)(x2 >> 8); - le_store_word32(out + 4, x01); -} - -/** - * \brief Encrypts the plaintext with COMET-128 to produce the ciphertext. - * - * \param Y Internal COMET block state of 16 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param c Ciphertext on output. - * \param m Plaintext message on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_encrypt_128 - (unsigned char Y[16], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char Ys[16]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 16) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block(Y, m, 16); - lw_xor_block_2_src(c, m, Ys, 16); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block(Y, m, temp); - lw_xor_block_2_src(c, m, Ys, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Encrypts the plaintext with COMET-64 to produce the ciphertext. - * - * \param Y Internal COMET block state of 8 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param c Ciphertext on output. - * \param m Plaintext message on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_encrypt_64 - (unsigned char Y[8], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char Ys[8]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 8) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block(Y, m, 8); - lw_xor_block_2_src(c, m, Ys, 8); - c += 8; - m += 8; - mlen -= 8; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block(Y, m, temp); - lw_xor_block_2_src(c, m, Ys, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Decrypts the ciphertext with COMET-128 to produce the plaintext. - * - * \param Y Internal COMET block state of 16 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param m Plaintext message on output. - * \param c Ciphertext on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_decrypt_128 - (unsigned char Y[16], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char Ys[16]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 16) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block_2_src(m, c, Ys, 16); - lw_xor_block(Y, m, 16); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block_2_src(m, c, Ys, temp); - lw_xor_block(Y, m, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Decrypts the ciphertext with COMET-64 to produce the plaintext. - * - * \param Y Internal COMET block state of 8 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param m Plaintext message on output. - * \param c Ciphertext on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_decrypt_64 - (unsigned char Y[8], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char Ys[8]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 8) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block_2_src(m, c, Ys, 8); - lw_xor_block(Y, m, 8); - c += 8; - m += 8; - mlen -= 8; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block_2_src(m, c, Ys, temp); - lw_xor_block(Y, m, temp); - Y[temp] ^= 0x01; - } -} - -int comet_128_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[16]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_128_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memcpy(Y, k, 16); - cham128_128_encrypt(Y, Z, npub); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 16, cham128_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_128(Y, Z, cham128_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham128_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_128_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[16]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_128_TAG_SIZE) - return -1; - *mlen = clen - COMET_128_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memcpy(Y, k, 16); - cham128_128_encrypt(Y, Z, npub); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 16, cham128_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_128_TAG_SIZE) - comet_decrypt_128(Y, Z, cham128_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham128_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_128_TAG_SIZE); -} - -int comet_64_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - cham64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, cham64_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_64(Y, Z, cham64_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham64_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_64_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_64_TAG_SIZE) - return -1; - *mlen = clen - COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - cham64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, cham64_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, cham64_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham64_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); -} - -int comet_64_speck_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - speck64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - speck64_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_64_speck_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_64_TAG_SIZE) - return -1; - *mlen = clen - COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - speck64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - speck64_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); -} diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.h deleted file mode 100644 index d1b24a6..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/comet.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_COMET_H -#define LWCRYPTO_COMET_H - -#include "aead-common.h" - -/** - * \file comet.h - * \brief COMET authenticated encryption algorithm. - * - * COMET is a family of authenticated encryption algorithms that are - * built around an underlying block cipher. This library implements - * three members of the family: - * - * \li COMET-128_CHAM-128/128 which has a 128-bit key, a 128-bit nonce, - * and a 128-bit tag, built around the CHAM-128/128 block cipher. - * \li COMET-64_CHAM-64/128 which has a 128-bit key, a 120-bit nonce, - * and a 64-bit tag, built around the CHAM-64/128 block cipher. - * \li COMET-64_SPECK-64/128 which has a 128-bit key, a 120-bit nonce, - * and a 64-bit tag, built around the SPECK-64/128 block cipher. - * - * There is also another family member that is built around AES but - * this library does not implement that version. - * - * References: https://www.isical.ac.in/~lightweight/comet/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all COMET family members. - */ -#define COMET_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for the 128-bit versions of COMET. - */ -#define COMET_128_TAG_SIZE 16 - -/** - * \brief Size of the authentication tag for the 64-bit versions of COMET. - */ -#define COMET_64_TAG_SIZE 8 - -/** - * \brief Size of the nonce for the 128-bit versions of COMET. - */ -#define COMET_128_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for the 64-bit versions of COMET. - */ -#define COMET_64_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the COMET-128_CHAM-128/128 cipher. - */ -extern aead_cipher_t const comet_128_cham_cipher; - -/** - * \brief Meta-information block for the COMET-64_CHAM-64/128 cipher. - */ -extern aead_cipher_t const comet_64_cham_cipher; - -/** - * \brief Meta-information block for the COMET-64_SPECK-64/128 cipher. - */ -extern aead_cipher_t const comet_64_speck_cipher; - -/** - * \brief Encrypts and authenticates a packet with COMET-128_CHAM-128/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_128_cham_aead_decrypt() - */ -int comet_128_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-128_CHAM-128/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_128_cham_aead_encrypt() - */ -int comet_128_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with COMET-64_CHAM-64/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_64_cham_aead_decrypt() - */ -int comet_64_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-64_CHAM-64/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_64_cham_aead_encrypt() - */ -int comet_64_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with COMET-64_SPECK-64/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_64_speck_aead_decrypt() - */ -int comet_64_speck_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-64_SPECK-64/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_64_speck_aead_encrypt() - */ -int comet_64_speck_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/encrypt.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/encrypt.c deleted file mode 100644 index e832eac..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "comet.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return comet_64_cham_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return comet_64_cham_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham-avr.S b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham-avr.S deleted file mode 100644 index 514a09a..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham-avr.S +++ /dev/null @@ -1,915 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global cham128_128_encrypt - .type cham128_128_encrypt, @function -cham128_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 48 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+21,r2 - std Y+22,r3 - std Y+23,r4 - std Y+24,r5 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+17,r2 - std Y+18,r3 - std Y+19,r4 - std Y+20,r5 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+29,r2 - std Y+30,r3 - std Y+31,r4 - std Y+32,r5 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+25,r2 - std Y+26,r3 - std Y+27,r4 - std Y+28,r5 - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r18,X+ - ld r22,X+ - ld r23,X+ - ld r24,X+ - ld r25,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - mov r30,r1 -197: - eor r19,r30 - movw r10,r22 - movw r12,r24 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+1 - eor r10,r0 - ldd r0,Y+2 - eor r11,r0 - ldd r0,Y+3 - eor r12,r0 - ldd r0,Y+4 - eor r13,r0 - add r19,r10 - adc r20,r11 - adc r21,r12 - adc r18,r13 - inc r30 - eor r22,r30 - mov r0,r5 - mov r5,r4 - mov r4,r3 - mov r3,r2 - mov r2,r0 - ldd r10,Y+5 - ldd r11,Y+6 - ldd r12,Y+7 - ldd r13,Y+8 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - add r22,r10 - adc r23,r11 - adc r24,r12 - adc r25,r13 - lsl r22 - rol r23 - rol r24 - rol r25 - adc r22,r1 - inc r30 - eor r3,r30 - movw r10,r6 - movw r12,r8 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+9 - eor r10,r0 - ldd r0,Y+10 - eor r11,r0 - ldd r0,Y+11 - eor r12,r0 - ldd r0,Y+12 - eor r13,r0 - add r3,r10 - adc r4,r11 - adc r5,r12 - adc r2,r13 - inc r30 - eor r6,r30 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r10,Y+13 - ldd r11,Y+14 - ldd r12,Y+15 - ldd r13,Y+16 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - add r6,r10 - adc r7,r11 - adc r8,r12 - adc r9,r13 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - inc r30 - eor r19,r30 - movw r10,r22 - movw r12,r24 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+17 - eor r10,r0 - ldd r0,Y+18 - eor r11,r0 - ldd r0,Y+19 - eor r12,r0 - ldd r0,Y+20 - eor r13,r0 - add r19,r10 - adc r20,r11 - adc r21,r12 - adc r18,r13 - inc r30 - eor r22,r30 - mov r0,r5 - mov r5,r4 - mov r4,r3 - mov r3,r2 - mov r2,r0 - ldd r10,Y+21 - ldd r11,Y+22 - ldd r12,Y+23 - ldd r13,Y+24 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - add r22,r10 - adc r23,r11 - adc r24,r12 - adc r25,r13 - lsl r22 - rol r23 - rol r24 - rol r25 - adc r22,r1 - inc r30 - eor r3,r30 - movw r10,r6 - movw r12,r8 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+25 - eor r10,r0 - ldd r0,Y+26 - eor r11,r0 - ldd r0,Y+27 - eor r12,r0 - ldd r0,Y+28 - eor r13,r0 - add r3,r10 - adc r4,r11 - adc r5,r12 - adc r2,r13 - inc r30 - eor r6,r30 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r10,Y+29 - ldd r11,Y+30 - ldd r12,Y+31 - ldd r13,Y+32 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - add r6,r10 - adc r7,r11 - adc r8,r12 - adc r9,r13 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - inc r30 - ldi r31,80 - cpse r30,r31 - rjmp 197b - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r18 - st X+,r22 - st X+,r23 - st X+,r24 - st X+,r25 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size cham128_128_encrypt, .-cham128_128_encrypt - - .text -.global cham64_128_encrypt - .type cham64_128_encrypt, @function -cham64_128_encrypt: - push r28 - push r29 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 38 - ld r18,Z - ldd r19,Z+1 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+1,r18 - std Y+2,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+19,r22 - std Y+20,r23 - ldd r18,Z+2 - ldd r19,Z+3 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+3,r18 - std Y+4,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+17,r22 - std Y+18,r23 - ldd r18,Z+4 - ldd r19,Z+5 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+5,r18 - std Y+6,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+23,r22 - std Y+24,r23 - ldd r18,Z+6 - ldd r19,Z+7 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+7,r18 - std Y+8,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+21,r22 - std Y+22,r23 - ldd r18,Z+8 - ldd r19,Z+9 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+9,r18 - std Y+10,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+27,r22 - std Y+28,r23 - ldd r18,Z+10 - ldd r19,Z+11 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+11,r18 - std Y+12,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+25,r22 - std Y+26,r23 - ldd r18,Z+12 - ldd r19,Z+13 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+13,r18 - std Y+14,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+31,r22 - std Y+32,r23 - ldd r18,Z+14 - ldd r19,Z+15 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+15,r18 - std Y+16,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+29,r22 - std Y+30,r23 - ld r19,X+ - ld r18,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r24,X+ - ld r25,X+ - mov r16,r1 -201: - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+1 - eor r30,r0 - ldd r0,Y+2 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+3 - ldd r31,Y+4 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+5 - eor r30,r0 - ldd r0,Y+6 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+7 - ldd r31,Y+8 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+9 - eor r30,r0 - ldd r0,Y+10 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+11 - ldd r31,Y+12 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+13 - eor r30,r0 - ldd r0,Y+14 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+15 - ldd r31,Y+16 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+17 - eor r30,r0 - ldd r0,Y+18 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+19 - ldd r31,Y+20 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+21 - eor r30,r0 - ldd r0,Y+22 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+23 - ldd r31,Y+24 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+25 - eor r30,r0 - ldd r0,Y+26 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+27 - ldd r31,Y+28 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+29 - eor r30,r0 - ldd r0,Y+30 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+31 - ldd r31,Y+32 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - ldi r17,80 - cpse r16,r17 - rjmp 201b - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r19 - st X+,r18 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r24 - st X+,r25 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r29 - pop r28 - ret - .size cham64_128_encrypt, .-cham64_128_encrypt - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.c deleted file mode 100644 index 23351a3..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-cham.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -void cham128_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t x0, x1, x2, x3; - uint32_t k[8]; - uint8_t round; - - /* Unpack the key and generate the key schedule */ - k[0] = le_load_word32(key); - k[1] = le_load_word32(key + 4); - k[2] = le_load_word32(key + 8); - k[3] = le_load_word32(key + 12); - k[4] = k[1] ^ leftRotate1(k[1]) ^ leftRotate11(k[1]); - k[5] = k[0] ^ leftRotate1(k[0]) ^ leftRotate11(k[0]); - k[6] = k[3] ^ leftRotate1(k[3]) ^ leftRotate11(k[3]); - k[7] = k[2] ^ leftRotate1(k[2]) ^ leftRotate11(k[2]); - k[0] ^= leftRotate1(k[0]) ^ leftRotate8(k[0]); - k[1] ^= leftRotate1(k[1]) ^ leftRotate8(k[1]); - k[2] ^= leftRotate1(k[2]) ^ leftRotate8(k[2]); - k[3] ^= leftRotate1(k[3]) ^ leftRotate8(k[3]); - - /* Unpack the input block */ - x0 = le_load_word32(input); - x1 = le_load_word32(input + 4); - x2 = le_load_word32(input + 8); - x3 = le_load_word32(input + 12); - - /* Perform the 80 rounds eight at a time */ - for (round = 0; round < 80; round += 8) { - x0 = leftRotate8((x0 ^ round) + (leftRotate1(x1) ^ k[0])); - x1 = leftRotate1((x1 ^ (round + 1)) + (leftRotate8(x2) ^ k[1])); - x2 = leftRotate8((x2 ^ (round + 2)) + (leftRotate1(x3) ^ k[2])); - x3 = leftRotate1((x3 ^ (round + 3)) + (leftRotate8(x0) ^ k[3])); - x0 = leftRotate8((x0 ^ (round + 4)) + (leftRotate1(x1) ^ k[4])); - x1 = leftRotate1((x1 ^ (round + 5)) + (leftRotate8(x2) ^ k[5])); - x2 = leftRotate8((x2 ^ (round + 6)) + (leftRotate1(x3) ^ k[6])); - x3 = leftRotate1((x3 ^ (round + 7)) + (leftRotate8(x0) ^ k[7])); - } - - /* Pack the state into the output block */ - le_store_word32(output, x0); - le_store_word32(output + 4, x1); - le_store_word32(output + 8, x2); - le_store_word32(output + 12, x3); -} - -void cham64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint16_t x0, x1, x2, x3; - uint16_t k[16]; - uint8_t round; - - /* Unpack the key and generate the key schedule */ - k[0] = le_load_word16(key); - k[1] = le_load_word16(key + 2); - k[2] = le_load_word16(key + 4); - k[3] = le_load_word16(key + 6); - k[4] = le_load_word16(key + 8); - k[5] = le_load_word16(key + 10); - k[6] = le_load_word16(key + 12); - k[7] = le_load_word16(key + 14); - k[8] = k[1] ^ leftRotate1_16(k[1]) ^ leftRotate11_16(k[1]); - k[9] = k[0] ^ leftRotate1_16(k[0]) ^ leftRotate11_16(k[0]); - k[10] = k[3] ^ leftRotate1_16(k[3]) ^ leftRotate11_16(k[3]); - k[11] = k[2] ^ leftRotate1_16(k[2]) ^ leftRotate11_16(k[2]); - k[12] = k[5] ^ leftRotate1_16(k[5]) ^ leftRotate11_16(k[5]); - k[13] = k[4] ^ leftRotate1_16(k[4]) ^ leftRotate11_16(k[4]); - k[14] = k[7] ^ leftRotate1_16(k[7]) ^ leftRotate11_16(k[7]); - k[15] = k[6] ^ leftRotate1_16(k[6]) ^ leftRotate11_16(k[6]); - k[0] ^= leftRotate1_16(k[0]) ^ leftRotate8_16(k[0]); - k[1] ^= leftRotate1_16(k[1]) ^ leftRotate8_16(k[1]); - k[2] ^= leftRotate1_16(k[2]) ^ leftRotate8_16(k[2]); - k[3] ^= leftRotate1_16(k[3]) ^ leftRotate8_16(k[3]); - k[4] ^= leftRotate1_16(k[4]) ^ leftRotate8_16(k[4]); - k[5] ^= leftRotate1_16(k[5]) ^ leftRotate8_16(k[5]); - k[6] ^= leftRotate1_16(k[6]) ^ leftRotate8_16(k[6]); - k[7] ^= leftRotate1_16(k[7]) ^ leftRotate8_16(k[7]); - - /* Unpack the input block */ - x0 = le_load_word16(input); - x1 = le_load_word16(input + 2); - x2 = le_load_word16(input + 4); - x3 = le_load_word16(input + 6); - - /* Perform the 80 rounds four at a time */ - for (round = 0; round < 80; round += 4) { - x0 = leftRotate8_16 - ((x0 ^ round) + - (leftRotate1_16(x1) ^ k[round % 16])); - x1 = leftRotate1_16 - ((x1 ^ (round + 1)) + - (leftRotate8_16(x2) ^ k[(round + 1) % 16])); - x2 = leftRotate8_16 - ((x2 ^ (round + 2)) + - (leftRotate1_16(x3) ^ k[(round + 2) % 16])); - x3 = leftRotate1_16 - ((x3 ^ (round + 3)) + - (leftRotate8_16(x0) ^ k[(round + 3) % 16])); - } - - /* Pack the state into the output block */ - le_store_word16(output, x0); - le_store_word16(output + 2, x1); - le_store_word16(output + 4, x2); - le_store_word16(output + 6, x3); -} - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.h deleted file mode 100644 index 29d5ccf..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-cham.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_CHAM_H -#define LW_INTERNAL_CHAM_H - -/** - * \file internal-cham.h - * \brief CHAM block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a 128-bit block with CHAM-128-128. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void cham128_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 64-bit block with CHAM-64-128. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void cham64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64-avr.S b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64-avr.S deleted file mode 100644 index d8d641e..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64-avr.S +++ /dev/null @@ -1,272 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global speck64_128_encrypt - .type speck64_128_encrypt, @function -speck64_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ld r14,X+ - ld r15,X+ - ld r24,X+ - ld r25,X+ - ld r30,X+ - ld r31,X+ - ld r12,X+ - ld r13,X+ - mov r16,r1 -25: - add r31,r14 - adc r12,r15 - adc r13,r24 - adc r30,r25 - eor r31,r18 - eor r12,r19 - eor r13,r20 - eor r30,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r31 - eor r15,r12 - eor r24,r13 - eor r25,r30 - mov r0,r22 - mov r22,r23 - add r22,r18 - mov r23,r2 - adc r23,r19 - mov r2,r3 - adc r2,r20 - mov r3,r0 - adc r3,r21 - eor r22,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - inc r16 - add r12,r14 - adc r13,r15 - adc r30,r24 - adc r31,r25 - eor r12,r18 - eor r13,r19 - eor r30,r20 - eor r31,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r12 - eor r15,r13 - eor r24,r30 - eor r25,r31 - mov r0,r4 - mov r4,r5 - add r4,r18 - mov r5,r6 - adc r5,r19 - mov r6,r7 - adc r6,r20 - mov r7,r0 - adc r7,r21 - eor r4,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - inc r16 - add r13,r14 - adc r30,r15 - adc r31,r24 - adc r12,r25 - eor r13,r18 - eor r30,r19 - eor r31,r20 - eor r12,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r13 - eor r15,r30 - eor r24,r31 - eor r25,r12 - mov r0,r12 - mov r12,r31 - mov r31,r30 - mov r30,r13 - mov r13,r0 - mov r0,r8 - mov r8,r9 - add r8,r18 - mov r9,r10 - adc r9,r19 - mov r10,r11 - adc r10,r20 - mov r11,r0 - adc r11,r21 - eor r8,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - inc r16 - ldi r17,27 - cpse r16,r17 - rjmp 25b - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - st X+,r30 - st X+,r31 - st X+,r12 - st X+,r13 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size speck64_128_encrypt, .-speck64_128_encrypt - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.c deleted file mode 100644 index 494c801..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-speck64.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -void speck64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t l0, l1, l2, s; - uint32_t x, y; - uint8_t round; - - /* Unpack the key and the input block */ - s = le_load_word32(key); - l0 = le_load_word32(key + 4); - l1 = le_load_word32(key + 8); - l2 = le_load_word32(key + 12); - y = le_load_word32(input); - x = le_load_word32(input + 4); - - /* Perform all 27 encryption rounds, in groups of 3 */ - #define round_xy() \ - do { \ - x = (rightRotate8(x) + y) ^ s; \ - y = leftRotate3(y) ^ x; \ - } while (0) - #define schedule(l) \ - do { \ - l = (s + rightRotate8(l)) ^ round; \ - s = leftRotate3(s) ^ l; \ - ++round; \ - } while (0) - for (round = 0; round < 27; ) { - round_xy(); - schedule(l0); - round_xy(); - schedule(l1); - round_xy(); - schedule(l2); - } - - /* Write the result to the output */ - le_store_word32(output, y); - le_store_word32(output + 4, x); -} - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.h deleted file mode 100644 index fdf840a..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-speck64.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPECK64_H -#define LW_INTERNAL_SPECK64_H - -/** - * \file internal-speck64.h - * \brief SPECK-64 block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \note This version expects the key, input, and output to be in - * little-endian byte order, as expected by the COMET specification. - */ -void speck64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-util.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys/comet.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys/comet.c index d068de2..ceb0fd6 100644 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys/comet.c +++ b/comet/Implementations/crypto_aead/comet64chamv1/rhys/comet.c @@ -22,6 +22,7 @@ #include "comet.h" #include "internal-cham.h" +#include "internal-speck64.h" #include "internal-util.h" #include @@ -478,58 +479,6 @@ int comet_64_cham_aead_decrypt return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); } -/** - * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \note This version differs from standard SPECK-64 in that it uses the - * little-endian byte order from the COMET specification which is different - * from the big-endian byte order from the original SPECK paper. - */ -static void speck64_128_comet_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t l[4]; - uint32_t x, y, s; - uint8_t round; - uint8_t li_in = 0; - uint8_t li_out = 3; - - /* Unpack the key and the input block */ - s = le_load_word32(key); - l[0] = le_load_word32(key + 4); - l[1] = le_load_word32(key + 8); - l[2] = le_load_word32(key + 12); - y = le_load_word32(input); - x = le_load_word32(input + 4); - - /* Perform all encryption rounds except the last */ - for (round = 0; round < 26; ++round) { - /* Perform the round with the current key schedule word */ - x = (rightRotate8(x) + y) ^ s; - y = leftRotate3(y) ^ x; - - /* Calculate the next key schedule word */ - l[li_out] = (s + rightRotate8(l[li_in])) ^ round; - s = leftRotate3(s) ^ l[li_out]; - li_in = (li_in + 1) & 0x03; - li_out = (li_out + 1) & 0x03; - } - - /* Perform the last encryption round and write the result to the output */ - x = (rightRotate8(x) + y) ^ s; - y = leftRotate3(y) ^ x; - le_store_word32(output, y); - le_store_word32(output + 4, x); -} - int comet_64_speck_aead_encrypt (unsigned char *c, unsigned long long *clen, const unsigned char *m, unsigned long long mlen, @@ -547,23 +496,23 @@ int comet_64_speck_aead_encrypt /* Set up the initial state of Y and Z */ memset(Y, 0, 8); - speck64_128_comet_encrypt(k, Y, Y); + speck64_128_encrypt(k, Y, Y); memcpy(Z, npub, 15); Z[15] = 0; lw_xor_block(Z, k, 16); /* Process the associated data */ if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen); + comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); /* Encrypt the plaintext to produce the ciphertext */ if (mlen > 0) - comet_encrypt_64(Y, Z, speck64_128_comet_encrypt, c, m, mlen); + comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen); /* Generate the authentication tag */ Z[15] ^= 0x80; comet_adjust_block_key(Z); - speck64_128_comet_encrypt(Z, c + mlen, Y); + speck64_128_encrypt(Z, c + mlen, Y); return 0; } @@ -586,22 +535,22 @@ int comet_64_speck_aead_decrypt /* Set up the initial state of Y and Z */ memset(Y, 0, 8); - speck64_128_comet_encrypt(k, Y, Y); + speck64_128_encrypt(k, Y, Y); memcpy(Z, npub, 15); Z[15] = 0; lw_xor_block(Z, k, 16); /* Process the associated data */ if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen); + comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); /* Decrypt the ciphertext to produce the plaintext */ if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, speck64_128_comet_encrypt, m, c, *mlen); + comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen); /* Check the authentication tag */ Z[15] ^= 0x80; comet_adjust_block_key(Z); - speck64_128_comet_encrypt(Z, Y, Y); + speck64_128_encrypt(Z, Y, Y); return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); } diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham-avr.S b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham-avr.S new file mode 100644 index 0000000..514a09a --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham-avr.S @@ -0,0 +1,915 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global cham128_128_encrypt + .type cham128_128_encrypt, @function +cham128_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 48 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+21,r2 + std Y+22,r3 + std Y+23,r4 + std Y+24,r5 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+17,r2 + std Y+18,r3 + std Y+19,r4 + std Y+20,r5 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+29,r2 + std Y+30,r3 + std Y+31,r4 + std Y+32,r5 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+25,r2 + std Y+26,r3 + std Y+27,r4 + std Y+28,r5 + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r18,X+ + ld r22,X+ + ld r23,X+ + ld r24,X+ + ld r25,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + mov r30,r1 +197: + eor r19,r30 + movw r10,r22 + movw r12,r24 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+1 + eor r10,r0 + ldd r0,Y+2 + eor r11,r0 + ldd r0,Y+3 + eor r12,r0 + ldd r0,Y+4 + eor r13,r0 + add r19,r10 + adc r20,r11 + adc r21,r12 + adc r18,r13 + inc r30 + eor r22,r30 + mov r0,r5 + mov r5,r4 + mov r4,r3 + mov r3,r2 + mov r2,r0 + ldd r10,Y+5 + ldd r11,Y+6 + ldd r12,Y+7 + ldd r13,Y+8 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + add r22,r10 + adc r23,r11 + adc r24,r12 + adc r25,r13 + lsl r22 + rol r23 + rol r24 + rol r25 + adc r22,r1 + inc r30 + eor r3,r30 + movw r10,r6 + movw r12,r8 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+9 + eor r10,r0 + ldd r0,Y+10 + eor r11,r0 + ldd r0,Y+11 + eor r12,r0 + ldd r0,Y+12 + eor r13,r0 + add r3,r10 + adc r4,r11 + adc r5,r12 + adc r2,r13 + inc r30 + eor r6,r30 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r10,Y+13 + ldd r11,Y+14 + ldd r12,Y+15 + ldd r13,Y+16 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + add r6,r10 + adc r7,r11 + adc r8,r12 + adc r9,r13 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + inc r30 + eor r19,r30 + movw r10,r22 + movw r12,r24 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+17 + eor r10,r0 + ldd r0,Y+18 + eor r11,r0 + ldd r0,Y+19 + eor r12,r0 + ldd r0,Y+20 + eor r13,r0 + add r19,r10 + adc r20,r11 + adc r21,r12 + adc r18,r13 + inc r30 + eor r22,r30 + mov r0,r5 + mov r5,r4 + mov r4,r3 + mov r3,r2 + mov r2,r0 + ldd r10,Y+21 + ldd r11,Y+22 + ldd r12,Y+23 + ldd r13,Y+24 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + add r22,r10 + adc r23,r11 + adc r24,r12 + adc r25,r13 + lsl r22 + rol r23 + rol r24 + rol r25 + adc r22,r1 + inc r30 + eor r3,r30 + movw r10,r6 + movw r12,r8 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+25 + eor r10,r0 + ldd r0,Y+26 + eor r11,r0 + ldd r0,Y+27 + eor r12,r0 + ldd r0,Y+28 + eor r13,r0 + add r3,r10 + adc r4,r11 + adc r5,r12 + adc r2,r13 + inc r30 + eor r6,r30 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r10,Y+29 + ldd r11,Y+30 + ldd r12,Y+31 + ldd r13,Y+32 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + add r6,r10 + adc r7,r11 + adc r8,r12 + adc r9,r13 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + inc r30 + ldi r31,80 + cpse r30,r31 + rjmp 197b + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r18 + st X+,r22 + st X+,r23 + st X+,r24 + st X+,r25 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size cham128_128_encrypt, .-cham128_128_encrypt + + .text +.global cham64_128_encrypt + .type cham64_128_encrypt, @function +cham64_128_encrypt: + push r28 + push r29 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 38 + ld r18,Z + ldd r19,Z+1 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+1,r18 + std Y+2,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+19,r22 + std Y+20,r23 + ldd r18,Z+2 + ldd r19,Z+3 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+3,r18 + std Y+4,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+17,r22 + std Y+18,r23 + ldd r18,Z+4 + ldd r19,Z+5 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+5,r18 + std Y+6,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+23,r22 + std Y+24,r23 + ldd r18,Z+6 + ldd r19,Z+7 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+7,r18 + std Y+8,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+21,r22 + std Y+22,r23 + ldd r18,Z+8 + ldd r19,Z+9 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+9,r18 + std Y+10,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+27,r22 + std Y+28,r23 + ldd r18,Z+10 + ldd r19,Z+11 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+11,r18 + std Y+12,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+25,r22 + std Y+26,r23 + ldd r18,Z+12 + ldd r19,Z+13 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+13,r18 + std Y+14,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+31,r22 + std Y+32,r23 + ldd r18,Z+14 + ldd r19,Z+15 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+15,r18 + std Y+16,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+29,r22 + std Y+30,r23 + ld r19,X+ + ld r18,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r24,X+ + ld r25,X+ + mov r16,r1 +201: + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+1 + eor r30,r0 + ldd r0,Y+2 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+3 + ldd r31,Y+4 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+5 + eor r30,r0 + ldd r0,Y+6 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+7 + ldd r31,Y+8 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+9 + eor r30,r0 + ldd r0,Y+10 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+11 + ldd r31,Y+12 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+13 + eor r30,r0 + ldd r0,Y+14 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+15 + ldd r31,Y+16 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+17 + eor r30,r0 + ldd r0,Y+18 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+19 + ldd r31,Y+20 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+21 + eor r30,r0 + ldd r0,Y+22 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+23 + ldd r31,Y+24 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+25 + eor r30,r0 + ldd r0,Y+26 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+27 + ldd r31,Y+28 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+29 + eor r30,r0 + ldd r0,Y+30 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+31 + ldd r31,Y+32 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + ldi r17,80 + cpse r16,r17 + rjmp 201b + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r19 + st X+,r18 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r24 + st X+,r25 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r29 + pop r28 + ret + .size cham64_128_encrypt, .-cham64_128_encrypt + +#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham.c index e097dbd..23351a3 100644 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham.c +++ b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-cham.c @@ -23,6 +23,8 @@ #include "internal-cham.h" #include "internal-util.h" +#if !defined(__AVR__) + void cham128_128_encrypt (const unsigned char *key, unsigned char *output, const unsigned char *input) @@ -132,3 +134,5 @@ void cham64_128_encrypt le_store_word16(output + 4, x2); le_store_word16(output + 6, x3); } + +#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64-avr.S b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64-avr.S new file mode 100644 index 0000000..d8d641e --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64-avr.S @@ -0,0 +1,272 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global speck64_128_encrypt + .type speck64_128_encrypt, @function +speck64_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ld r14,X+ + ld r15,X+ + ld r24,X+ + ld r25,X+ + ld r30,X+ + ld r31,X+ + ld r12,X+ + ld r13,X+ + mov r16,r1 +25: + add r31,r14 + adc r12,r15 + adc r13,r24 + adc r30,r25 + eor r31,r18 + eor r12,r19 + eor r13,r20 + eor r30,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r31 + eor r15,r12 + eor r24,r13 + eor r25,r30 + mov r0,r22 + mov r22,r23 + add r22,r18 + mov r23,r2 + adc r23,r19 + mov r2,r3 + adc r2,r20 + mov r3,r0 + adc r3,r21 + eor r22,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + inc r16 + add r12,r14 + adc r13,r15 + adc r30,r24 + adc r31,r25 + eor r12,r18 + eor r13,r19 + eor r30,r20 + eor r31,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r12 + eor r15,r13 + eor r24,r30 + eor r25,r31 + mov r0,r4 + mov r4,r5 + add r4,r18 + mov r5,r6 + adc r5,r19 + mov r6,r7 + adc r6,r20 + mov r7,r0 + adc r7,r21 + eor r4,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + inc r16 + add r13,r14 + adc r30,r15 + adc r31,r24 + adc r12,r25 + eor r13,r18 + eor r30,r19 + eor r31,r20 + eor r12,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r13 + eor r15,r30 + eor r24,r31 + eor r25,r12 + mov r0,r12 + mov r12,r31 + mov r31,r30 + mov r30,r13 + mov r13,r0 + mov r0,r8 + mov r8,r9 + add r8,r18 + mov r9,r10 + adc r9,r19 + mov r10,r11 + adc r10,r20 + mov r11,r0 + adc r11,r21 + eor r8,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + inc r16 + ldi r17,27 + cpse r16,r17 + rjmp 25b + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + st X+,r30 + st X+,r31 + st X+,r12 + st X+,r13 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size speck64_128_encrypt, .-speck64_128_encrypt + +#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.c b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.c new file mode 100644 index 0000000..494c801 --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.c @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-speck64.h" +#include "internal-util.h" + +#if !defined(__AVR__) + +void speck64_128_encrypt + (const unsigned char *key, unsigned char *output, + const unsigned char *input) +{ + uint32_t l0, l1, l2, s; + uint32_t x, y; + uint8_t round; + + /* Unpack the key and the input block */ + s = le_load_word32(key); + l0 = le_load_word32(key + 4); + l1 = le_load_word32(key + 8); + l2 = le_load_word32(key + 12); + y = le_load_word32(input); + x = le_load_word32(input + 4); + + /* Perform all 27 encryption rounds, in groups of 3 */ + #define round_xy() \ + do { \ + x = (rightRotate8(x) + y) ^ s; \ + y = leftRotate3(y) ^ x; \ + } while (0) + #define schedule(l) \ + do { \ + l = (s + rightRotate8(l)) ^ round; \ + s = leftRotate3(s) ^ l; \ + ++round; \ + } while (0) + for (round = 0; round < 27; ) { + round_xy(); + schedule(l0); + round_xy(); + schedule(l1); + round_xy(); + schedule(l2); + } + + /* Write the result to the output */ + le_store_word32(output, y); + le_store_word32(output + 4, x); +} + +#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.h new file mode 100644 index 0000000..fdf840a --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-speck64.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SPECK64_H +#define LW_INTERNAL_SPECK64_H + +/** + * \file internal-speck64.h + * \brief SPECK-64 block cipher. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. + * + * \param key Points to the 16 bytes of the key. + * \param output Output buffer which must be at least 8 bytes in length. + * \param input Input buffer which must be at least 8 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * \note This version expects the key, input, and output to be in + * little-endian byte order, as expected by the COMET specification. + */ +void speck64_128_encrypt + (const unsigned char *key, unsigned char *output, + const unsigned char *input); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-util.h b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-util.h +++ b/comet/Implementations/crypto_aead/comet64chamv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/api.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/api.h deleted file mode 100644 index 9f9959f..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 15 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.c deleted file mode 100644 index ceb0fd6..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.c +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "comet.h" -#include "internal-cham.h" -#include "internal-speck64.h" -#include "internal-util.h" -#include - -aead_cipher_t const comet_128_cham_cipher = { - "COMET-128_CHAM-128/128", - COMET_KEY_SIZE, - COMET_128_NONCE_SIZE, - COMET_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_128_cham_aead_encrypt, - comet_128_cham_aead_decrypt -}; - -aead_cipher_t const comet_64_cham_cipher = { - "COMET-64_CHAM-64/128", - COMET_KEY_SIZE, - COMET_64_NONCE_SIZE, - COMET_64_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_64_cham_aead_encrypt, - comet_64_cham_aead_decrypt -}; - -aead_cipher_t const comet_64_speck_cipher = { - "COMET-64_SPECK-64/128", - COMET_KEY_SIZE, - COMET_64_NONCE_SIZE, - COMET_64_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - comet_64_speck_aead_encrypt, - comet_64_speck_aead_decrypt -}; - -/** - * \brief Adjusts the Z state to generate the key to use on the next block. - * - * \param Z The Z state to be adjusted. - */ -static void comet_adjust_block_key(unsigned char Z[16]) -{ - /* Doubles the 64-bit prefix to Z in the F(2^64) field */ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)(Z[7])) >> 7); - for (index = 7; index > 0; --index) - Z[index] = (Z[index] << 1) | (Z[index - 1] >> 7); - Z[0] = (Z[0] << 1) ^ (mask & 0x1B); -} - -/* Function prototype for the encrypt function of the underyling cipher */ -typedef void (*comet_encrypt_block_t) - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -/** - * \brief Processes the associated data for COMET. - * - * \param Y Internal COMET block state of \a block_size bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param block_size Size of the block for the underlying cipher. - * \param encrypt Encryption function for the underlying cipher. - * \param ad Points to the associated data. - * \param adlen Number of bytes of associated data; must be >= 1. - */ -static void comet_process_ad - (unsigned char *Y, unsigned char Z[16], unsigned block_size, - comet_encrypt_block_t encrypt, const unsigned char *ad, - unsigned long long adlen) -{ - /* Domain separator for associated data */ - Z[15] ^= 0x08; - - /* Process all associated data blocks except the last partial block */ - while (adlen >= block_size) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - lw_xor_block(Y, ad, block_size); - ad += block_size; - adlen -= block_size; - } - - /* Pad and process the partial block on the end */ - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - Z[15] ^= 0x10; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - lw_xor_block(Y, ad, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Shuffles the words in a 128-bit block. - * - * \param out The output block after shuffling. - * \param in The input block to be shuffled. - */ -STATIC_INLINE void comet_shuffle_block_128 - (unsigned char out[16], const unsigned char in[16]) -{ - uint32_t x0, x1, x2, x3; - x0 = le_load_word32(in); - x1 = le_load_word32(in + 4); - x2 = le_load_word32(in + 8); - x3 = le_load_word32(in + 12); - le_store_word32(out, x3); - le_store_word32(out + 4, rightRotate1(x2)); - le_store_word32(out + 8, x0); - le_store_word32(out + 12, x1); -} - -/** - * \brief Shuffles the words in a 64-bit block. - * - * \param out The output block after shuffling. - * \param in The input block to be shuffled. - */ -STATIC_INLINE void comet_shuffle_block_64 - (unsigned char out[8], const unsigned char in[8]) -{ - uint32_t x01 = le_load_word32(in); - uint16_t x2 = ((uint16_t)(in[4])) | (((uint16_t)(in[5])) << 8); - out[0] = in[6]; - out[1] = in[7]; - x2 = (x2 >> 1) | (x2 << 15); - out[2] = (uint8_t)x2; - out[3] = (uint8_t)(x2 >> 8); - le_store_word32(out + 4, x01); -} - -/** - * \brief Encrypts the plaintext with COMET-128 to produce the ciphertext. - * - * \param Y Internal COMET block state of 16 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param c Ciphertext on output. - * \param m Plaintext message on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_encrypt_128 - (unsigned char Y[16], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char Ys[16]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 16) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block(Y, m, 16); - lw_xor_block_2_src(c, m, Ys, 16); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block(Y, m, temp); - lw_xor_block_2_src(c, m, Ys, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Encrypts the plaintext with COMET-64 to produce the ciphertext. - * - * \param Y Internal COMET block state of 8 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param c Ciphertext on output. - * \param m Plaintext message on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_encrypt_64 - (unsigned char Y[8], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char Ys[8]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 8) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block(Y, m, 8); - lw_xor_block_2_src(c, m, Ys, 8); - c += 8; - m += 8; - mlen -= 8; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block(Y, m, temp); - lw_xor_block_2_src(c, m, Ys, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Decrypts the ciphertext with COMET-128 to produce the plaintext. - * - * \param Y Internal COMET block state of 16 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param m Plaintext message on output. - * \param c Ciphertext on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_decrypt_128 - (unsigned char Y[16], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char Ys[16]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 16) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block_2_src(m, c, Ys, 16); - lw_xor_block(Y, m, 16); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_128(Ys, Y); - lw_xor_block_2_src(m, c, Ys, temp); - lw_xor_block(Y, m, temp); - Y[temp] ^= 0x01; - } -} - -/** - * \brief Decrypts the ciphertext with COMET-64 to produce the plaintext. - * - * \param Y Internal COMET block state of 8 bytes in size. - * \param Z Internal COMET key state of 16 bytes in size. - * \param encrypt Encryption function for the underlying cipher. - * \param m Plaintext message on output. - * \param c Ciphertext on input. - * \param mlen Length of the plaintext message and the ciphertext. - */ -static void comet_decrypt_64 - (unsigned char Y[8], unsigned char Z[16], - comet_encrypt_block_t encrypt, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char Ys[8]; - - /* Domain separator for payload data */ - Z[15] ^= 0x20; - - /* Process all payload data blocks except the last partial block */ - while (mlen >= 8) { - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block_2_src(m, c, Ys, 8); - lw_xor_block(Y, m, 8); - c += 8; - m += 8; - mlen -= 8; - } - - /* Pad and process the partial block on the end */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - Z[15] ^= 0x40; - comet_adjust_block_key(Z); - encrypt(Z, Y, Y); - comet_shuffle_block_64(Ys, Y); - lw_xor_block_2_src(m, c, Ys, temp); - lw_xor_block(Y, m, temp); - Y[temp] ^= 0x01; - } -} - -int comet_128_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[16]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_128_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memcpy(Y, k, 16); - cham128_128_encrypt(Y, Z, npub); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 16, cham128_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_128(Y, Z, cham128_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham128_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_128_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[16]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_128_TAG_SIZE) - return -1; - *mlen = clen - COMET_128_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memcpy(Y, k, 16); - cham128_128_encrypt(Y, Z, npub); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 16, cham128_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_128_TAG_SIZE) - comet_decrypt_128(Y, Z, cham128_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham128_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_128_TAG_SIZE); -} - -int comet_64_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - cham64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, cham64_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_64(Y, Z, cham64_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham64_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_64_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_64_TAG_SIZE) - return -1; - *mlen = clen - COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - cham64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, cham64_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, cham64_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - cham64_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); -} - -int comet_64_speck_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - speck64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen); - - /* Generate the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - speck64_128_encrypt(Z, c + mlen, Y); - return 0; -} - -int comet_64_speck_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char Y[8]; - unsigned char Z[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < COMET_64_TAG_SIZE) - return -1; - *mlen = clen - COMET_64_TAG_SIZE; - - /* Set up the initial state of Y and Z */ - memset(Y, 0, 8); - speck64_128_encrypt(k, Y, Y); - memcpy(Z, npub, 15); - Z[15] = 0; - lw_xor_block(Z, k, 16); - - /* Process the associated data */ - if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen); - - /* Check the authentication tag */ - Z[15] ^= 0x80; - comet_adjust_block_key(Z); - speck64_128_encrypt(Z, Y, Y); - return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); -} diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.h deleted file mode 100644 index d1b24a6..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/comet.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_COMET_H -#define LWCRYPTO_COMET_H - -#include "aead-common.h" - -/** - * \file comet.h - * \brief COMET authenticated encryption algorithm. - * - * COMET is a family of authenticated encryption algorithms that are - * built around an underlying block cipher. This library implements - * three members of the family: - * - * \li COMET-128_CHAM-128/128 which has a 128-bit key, a 128-bit nonce, - * and a 128-bit tag, built around the CHAM-128/128 block cipher. - * \li COMET-64_CHAM-64/128 which has a 128-bit key, a 120-bit nonce, - * and a 64-bit tag, built around the CHAM-64/128 block cipher. - * \li COMET-64_SPECK-64/128 which has a 128-bit key, a 120-bit nonce, - * and a 64-bit tag, built around the SPECK-64/128 block cipher. - * - * There is also another family member that is built around AES but - * this library does not implement that version. - * - * References: https://www.isical.ac.in/~lightweight/comet/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all COMET family members. - */ -#define COMET_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for the 128-bit versions of COMET. - */ -#define COMET_128_TAG_SIZE 16 - -/** - * \brief Size of the authentication tag for the 64-bit versions of COMET. - */ -#define COMET_64_TAG_SIZE 8 - -/** - * \brief Size of the nonce for the 128-bit versions of COMET. - */ -#define COMET_128_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for the 64-bit versions of COMET. - */ -#define COMET_64_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the COMET-128_CHAM-128/128 cipher. - */ -extern aead_cipher_t const comet_128_cham_cipher; - -/** - * \brief Meta-information block for the COMET-64_CHAM-64/128 cipher. - */ -extern aead_cipher_t const comet_64_cham_cipher; - -/** - * \brief Meta-information block for the COMET-64_SPECK-64/128 cipher. - */ -extern aead_cipher_t const comet_64_speck_cipher; - -/** - * \brief Encrypts and authenticates a packet with COMET-128_CHAM-128/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_128_cham_aead_decrypt() - */ -int comet_128_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-128_CHAM-128/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_128_cham_aead_encrypt() - */ -int comet_128_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with COMET-64_CHAM-64/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_64_cham_aead_decrypt() - */ -int comet_64_cham_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-64_CHAM-64/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_64_cham_aead_encrypt() - */ -int comet_64_cham_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with COMET-64_SPECK-64/128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa comet_64_speck_aead_decrypt() - */ -int comet_64_speck_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with COMET-64_SPECK-64/128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa comet_64_speck_aead_encrypt() - */ -int comet_64_speck_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/encrypt.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/encrypt.c deleted file mode 100644 index dc4f508..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "comet.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return comet_64_speck_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return comet_64_speck_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham-avr.S b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham-avr.S deleted file mode 100644 index 514a09a..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham-avr.S +++ /dev/null @@ -1,915 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global cham128_128_encrypt - .type cham128_128_encrypt, @function -cham128_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 48 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+21,r2 - std Y+22,r3 - std Y+23,r4 - std Y+24,r5 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+17,r2 - std Y+18,r3 - std Y+19,r4 - std Y+20,r5 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+29,r2 - std Y+30,r3 - std Y+31,r4 - std Y+32,r5 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - movw r22,r18 - movw r24,r20 - movw r6,r18 - movw r8,r20 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r18,r6 - eor r19,r7 - eor r20,r8 - eor r21,r9 - movw r2,r18 - movw r4,r20 - eor r18,r25 - eor r19,r22 - eor r20,r23 - eor r21,r24 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Y+25,r2 - std Y+26,r3 - std Y+27,r4 - std Y+28,r5 - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r18,X+ - ld r22,X+ - ld r23,X+ - ld r24,X+ - ld r25,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - mov r30,r1 -197: - eor r19,r30 - movw r10,r22 - movw r12,r24 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+1 - eor r10,r0 - ldd r0,Y+2 - eor r11,r0 - ldd r0,Y+3 - eor r12,r0 - ldd r0,Y+4 - eor r13,r0 - add r19,r10 - adc r20,r11 - adc r21,r12 - adc r18,r13 - inc r30 - eor r22,r30 - mov r0,r5 - mov r5,r4 - mov r4,r3 - mov r3,r2 - mov r2,r0 - ldd r10,Y+5 - ldd r11,Y+6 - ldd r12,Y+7 - ldd r13,Y+8 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - add r22,r10 - adc r23,r11 - adc r24,r12 - adc r25,r13 - lsl r22 - rol r23 - rol r24 - rol r25 - adc r22,r1 - inc r30 - eor r3,r30 - movw r10,r6 - movw r12,r8 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+9 - eor r10,r0 - ldd r0,Y+10 - eor r11,r0 - ldd r0,Y+11 - eor r12,r0 - ldd r0,Y+12 - eor r13,r0 - add r3,r10 - adc r4,r11 - adc r5,r12 - adc r2,r13 - inc r30 - eor r6,r30 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r10,Y+13 - ldd r11,Y+14 - ldd r12,Y+15 - ldd r13,Y+16 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - add r6,r10 - adc r7,r11 - adc r8,r12 - adc r9,r13 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - inc r30 - eor r19,r30 - movw r10,r22 - movw r12,r24 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+17 - eor r10,r0 - ldd r0,Y+18 - eor r11,r0 - ldd r0,Y+19 - eor r12,r0 - ldd r0,Y+20 - eor r13,r0 - add r19,r10 - adc r20,r11 - adc r21,r12 - adc r18,r13 - inc r30 - eor r22,r30 - mov r0,r5 - mov r5,r4 - mov r4,r3 - mov r3,r2 - mov r2,r0 - ldd r10,Y+21 - ldd r11,Y+22 - ldd r12,Y+23 - ldd r13,Y+24 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - add r22,r10 - adc r23,r11 - adc r24,r12 - adc r25,r13 - lsl r22 - rol r23 - rol r24 - rol r25 - adc r22,r1 - inc r30 - eor r3,r30 - movw r10,r6 - movw r12,r8 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - ldd r0,Y+25 - eor r10,r0 - ldd r0,Y+26 - eor r11,r0 - ldd r0,Y+27 - eor r12,r0 - ldd r0,Y+28 - eor r13,r0 - add r3,r10 - adc r4,r11 - adc r5,r12 - adc r2,r13 - inc r30 - eor r6,r30 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r10,Y+29 - ldd r11,Y+30 - ldd r12,Y+31 - ldd r13,Y+32 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - add r6,r10 - adc r7,r11 - adc r8,r12 - adc r9,r13 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - inc r30 - ldi r31,80 - cpse r30,r31 - rjmp 197b - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r18 - st X+,r22 - st X+,r23 - st X+,r24 - st X+,r25 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size cham128_128_encrypt, .-cham128_128_encrypt - - .text -.global cham64_128_encrypt - .type cham64_128_encrypt, @function -cham64_128_encrypt: - push r28 - push r29 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 38 - ld r18,Z - ldd r19,Z+1 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+1,r18 - std Y+2,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+19,r22 - std Y+20,r23 - ldd r18,Z+2 - ldd r19,Z+3 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+3,r18 - std Y+4,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+17,r22 - std Y+18,r23 - ldd r18,Z+4 - ldd r19,Z+5 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+5,r18 - std Y+6,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+23,r22 - std Y+24,r23 - ldd r18,Z+6 - ldd r19,Z+7 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+7,r18 - std Y+8,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+21,r22 - std Y+22,r23 - ldd r18,Z+8 - ldd r19,Z+9 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+9,r18 - std Y+10,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+27,r22 - std Y+28,r23 - ldd r18,Z+10 - ldd r19,Z+11 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+11,r18 - std Y+12,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+25,r22 - std Y+26,r23 - ldd r18,Z+12 - ldd r19,Z+13 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+13,r18 - std Y+14,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+31,r22 - std Y+32,r23 - ldd r18,Z+14 - ldd r19,Z+15 - movw r20,r18 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r18,r24 - eor r19,r25 - movw r22,r18 - eor r18,r21 - eor r19,r20 - std Y+15,r18 - std Y+16,r19 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - eor r22,r25 - eor r23,r24 - std Y+29,r22 - std Y+30,r23 - ld r19,X+ - ld r18,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r24,X+ - ld r25,X+ - mov r16,r1 -201: - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+1 - eor r30,r0 - ldd r0,Y+2 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+3 - ldd r31,Y+4 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+5 - eor r30,r0 - ldd r0,Y+6 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+7 - ldd r31,Y+8 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+9 - eor r30,r0 - ldd r0,Y+10 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+11 - ldd r31,Y+12 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+13 - eor r30,r0 - ldd r0,Y+14 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+15 - ldd r31,Y+16 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+17 - eor r30,r0 - ldd r0,Y+18 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+19 - ldd r31,Y+20 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+21 - eor r30,r0 - ldd r0,Y+22 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+23 - ldd r31,Y+24 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - eor r19,r16 - movw r30,r20 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+25 - eor r30,r0 - ldd r0,Y+26 - eor r31,r0 - add r19,r30 - adc r18,r31 - inc r16 - eor r20,r16 - mov r0,r23 - mov r23,r22 - mov r22,r0 - ldd r30,Y+27 - ldd r31,Y+28 - eor r30,r22 - eor r31,r23 - add r20,r30 - adc r21,r31 - lsl r20 - rol r21 - adc r20,r1 - inc r16 - eor r23,r16 - movw r30,r24 - lsl r30 - rol r31 - adc r30,r1 - ldd r0,Y+29 - eor r30,r0 - ldd r0,Y+30 - eor r31,r0 - add r23,r30 - adc r22,r31 - inc r16 - eor r24,r16 - mov r0,r19 - mov r19,r18 - mov r18,r0 - ldd r30,Y+31 - ldd r31,Y+32 - eor r30,r18 - eor r31,r19 - add r24,r30 - adc r25,r31 - lsl r24 - rol r25 - adc r24,r1 - inc r16 - ldi r17,80 - cpse r16,r17 - rjmp 201b - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r19 - st X+,r18 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r24 - st X+,r25 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r29 - pop r28 - ret - .size cham64_128_encrypt, .-cham64_128_encrypt - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.c deleted file mode 100644 index 23351a3..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-cham.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -void cham128_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t x0, x1, x2, x3; - uint32_t k[8]; - uint8_t round; - - /* Unpack the key and generate the key schedule */ - k[0] = le_load_word32(key); - k[1] = le_load_word32(key + 4); - k[2] = le_load_word32(key + 8); - k[3] = le_load_word32(key + 12); - k[4] = k[1] ^ leftRotate1(k[1]) ^ leftRotate11(k[1]); - k[5] = k[0] ^ leftRotate1(k[0]) ^ leftRotate11(k[0]); - k[6] = k[3] ^ leftRotate1(k[3]) ^ leftRotate11(k[3]); - k[7] = k[2] ^ leftRotate1(k[2]) ^ leftRotate11(k[2]); - k[0] ^= leftRotate1(k[0]) ^ leftRotate8(k[0]); - k[1] ^= leftRotate1(k[1]) ^ leftRotate8(k[1]); - k[2] ^= leftRotate1(k[2]) ^ leftRotate8(k[2]); - k[3] ^= leftRotate1(k[3]) ^ leftRotate8(k[3]); - - /* Unpack the input block */ - x0 = le_load_word32(input); - x1 = le_load_word32(input + 4); - x2 = le_load_word32(input + 8); - x3 = le_load_word32(input + 12); - - /* Perform the 80 rounds eight at a time */ - for (round = 0; round < 80; round += 8) { - x0 = leftRotate8((x0 ^ round) + (leftRotate1(x1) ^ k[0])); - x1 = leftRotate1((x1 ^ (round + 1)) + (leftRotate8(x2) ^ k[1])); - x2 = leftRotate8((x2 ^ (round + 2)) + (leftRotate1(x3) ^ k[2])); - x3 = leftRotate1((x3 ^ (round + 3)) + (leftRotate8(x0) ^ k[3])); - x0 = leftRotate8((x0 ^ (round + 4)) + (leftRotate1(x1) ^ k[4])); - x1 = leftRotate1((x1 ^ (round + 5)) + (leftRotate8(x2) ^ k[5])); - x2 = leftRotate8((x2 ^ (round + 6)) + (leftRotate1(x3) ^ k[6])); - x3 = leftRotate1((x3 ^ (round + 7)) + (leftRotate8(x0) ^ k[7])); - } - - /* Pack the state into the output block */ - le_store_word32(output, x0); - le_store_word32(output + 4, x1); - le_store_word32(output + 8, x2); - le_store_word32(output + 12, x3); -} - -void cham64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint16_t x0, x1, x2, x3; - uint16_t k[16]; - uint8_t round; - - /* Unpack the key and generate the key schedule */ - k[0] = le_load_word16(key); - k[1] = le_load_word16(key + 2); - k[2] = le_load_word16(key + 4); - k[3] = le_load_word16(key + 6); - k[4] = le_load_word16(key + 8); - k[5] = le_load_word16(key + 10); - k[6] = le_load_word16(key + 12); - k[7] = le_load_word16(key + 14); - k[8] = k[1] ^ leftRotate1_16(k[1]) ^ leftRotate11_16(k[1]); - k[9] = k[0] ^ leftRotate1_16(k[0]) ^ leftRotate11_16(k[0]); - k[10] = k[3] ^ leftRotate1_16(k[3]) ^ leftRotate11_16(k[3]); - k[11] = k[2] ^ leftRotate1_16(k[2]) ^ leftRotate11_16(k[2]); - k[12] = k[5] ^ leftRotate1_16(k[5]) ^ leftRotate11_16(k[5]); - k[13] = k[4] ^ leftRotate1_16(k[4]) ^ leftRotate11_16(k[4]); - k[14] = k[7] ^ leftRotate1_16(k[7]) ^ leftRotate11_16(k[7]); - k[15] = k[6] ^ leftRotate1_16(k[6]) ^ leftRotate11_16(k[6]); - k[0] ^= leftRotate1_16(k[0]) ^ leftRotate8_16(k[0]); - k[1] ^= leftRotate1_16(k[1]) ^ leftRotate8_16(k[1]); - k[2] ^= leftRotate1_16(k[2]) ^ leftRotate8_16(k[2]); - k[3] ^= leftRotate1_16(k[3]) ^ leftRotate8_16(k[3]); - k[4] ^= leftRotate1_16(k[4]) ^ leftRotate8_16(k[4]); - k[5] ^= leftRotate1_16(k[5]) ^ leftRotate8_16(k[5]); - k[6] ^= leftRotate1_16(k[6]) ^ leftRotate8_16(k[6]); - k[7] ^= leftRotate1_16(k[7]) ^ leftRotate8_16(k[7]); - - /* Unpack the input block */ - x0 = le_load_word16(input); - x1 = le_load_word16(input + 2); - x2 = le_load_word16(input + 4); - x3 = le_load_word16(input + 6); - - /* Perform the 80 rounds four at a time */ - for (round = 0; round < 80; round += 4) { - x0 = leftRotate8_16 - ((x0 ^ round) + - (leftRotate1_16(x1) ^ k[round % 16])); - x1 = leftRotate1_16 - ((x1 ^ (round + 1)) + - (leftRotate8_16(x2) ^ k[(round + 1) % 16])); - x2 = leftRotate8_16 - ((x2 ^ (round + 2)) + - (leftRotate1_16(x3) ^ k[(round + 2) % 16])); - x3 = leftRotate1_16 - ((x3 ^ (round + 3)) + - (leftRotate8_16(x0) ^ k[(round + 3) % 16])); - } - - /* Pack the state into the output block */ - le_store_word16(output, x0); - le_store_word16(output + 2, x1); - le_store_word16(output + 4, x2); - le_store_word16(output + 6, x3); -} - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.h deleted file mode 100644 index 29d5ccf..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-cham.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_CHAM_H -#define LW_INTERNAL_CHAM_H - -/** - * \file internal-cham.h - * \brief CHAM block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a 128-bit block with CHAM-128-128. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void cham128_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 64-bit block with CHAM-64-128. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void cham64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64-avr.S b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64-avr.S deleted file mode 100644 index d8d641e..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64-avr.S +++ /dev/null @@ -1,272 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global speck64_128_encrypt - .type speck64_128_encrypt, @function -speck64_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ld r14,X+ - ld r15,X+ - ld r24,X+ - ld r25,X+ - ld r30,X+ - ld r31,X+ - ld r12,X+ - ld r13,X+ - mov r16,r1 -25: - add r31,r14 - adc r12,r15 - adc r13,r24 - adc r30,r25 - eor r31,r18 - eor r12,r19 - eor r13,r20 - eor r30,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r31 - eor r15,r12 - eor r24,r13 - eor r25,r30 - mov r0,r22 - mov r22,r23 - add r22,r18 - mov r23,r2 - adc r23,r19 - mov r2,r3 - adc r2,r20 - mov r3,r0 - adc r3,r21 - eor r22,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - inc r16 - add r12,r14 - adc r13,r15 - adc r30,r24 - adc r31,r25 - eor r12,r18 - eor r13,r19 - eor r30,r20 - eor r31,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r12 - eor r15,r13 - eor r24,r30 - eor r25,r31 - mov r0,r4 - mov r4,r5 - add r4,r18 - mov r5,r6 - adc r5,r19 - mov r6,r7 - adc r6,r20 - mov r7,r0 - adc r7,r21 - eor r4,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - inc r16 - add r13,r14 - adc r30,r15 - adc r31,r24 - adc r12,r25 - eor r13,r18 - eor r30,r19 - eor r31,r20 - eor r12,r21 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r13 - eor r15,r30 - eor r24,r31 - eor r25,r12 - mov r0,r12 - mov r12,r31 - mov r31,r30 - mov r30,r13 - mov r13,r0 - mov r0,r8 - mov r8,r9 - add r8,r18 - mov r9,r10 - adc r9,r19 - mov r10,r11 - adc r10,r20 - mov r11,r0 - adc r11,r21 - eor r8,r16 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - inc r16 - ldi r17,27 - cpse r16,r17 - rjmp 25b - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - st X+,r30 - st X+,r31 - st X+,r12 - st X+,r13 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size speck64_128_encrypt, .-speck64_128_encrypt - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.c deleted file mode 100644 index 494c801..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-speck64.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -void speck64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t l0, l1, l2, s; - uint32_t x, y; - uint8_t round; - - /* Unpack the key and the input block */ - s = le_load_word32(key); - l0 = le_load_word32(key + 4); - l1 = le_load_word32(key + 8); - l2 = le_load_word32(key + 12); - y = le_load_word32(input); - x = le_load_word32(input + 4); - - /* Perform all 27 encryption rounds, in groups of 3 */ - #define round_xy() \ - do { \ - x = (rightRotate8(x) + y) ^ s; \ - y = leftRotate3(y) ^ x; \ - } while (0) - #define schedule(l) \ - do { \ - l = (s + rightRotate8(l)) ^ round; \ - s = leftRotate3(s) ^ l; \ - ++round; \ - } while (0) - for (round = 0; round < 27; ) { - round_xy(); - schedule(l0); - round_xy(); - schedule(l1); - round_xy(); - schedule(l2); - } - - /* Write the result to the output */ - le_store_word32(output, y); - le_store_word32(output + 4, x); -} - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.h deleted file mode 100644 index fdf840a..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-speck64.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPECK64_H -#define LW_INTERNAL_SPECK64_H - -/** - * \file internal-speck64.h - * \brief SPECK-64 block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \note This version expects the key, input, and output to be in - * little-endian byte order, as expected by the COMET specification. - */ -void speck64_128_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-util.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys/comet.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys/comet.c index d068de2..ceb0fd6 100644 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys/comet.c +++ b/comet/Implementations/crypto_aead/comet64speckv1/rhys/comet.c @@ -22,6 +22,7 @@ #include "comet.h" #include "internal-cham.h" +#include "internal-speck64.h" #include "internal-util.h" #include @@ -478,58 +479,6 @@ int comet_64_cham_aead_decrypt return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); } -/** - * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. - * - * \param key Points to the 16 bytes of the key. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \note This version differs from standard SPECK-64 in that it uses the - * little-endian byte order from the COMET specification which is different - * from the big-endian byte order from the original SPECK paper. - */ -static void speck64_128_comet_encrypt - (const unsigned char *key, unsigned char *output, - const unsigned char *input) -{ - uint32_t l[4]; - uint32_t x, y, s; - uint8_t round; - uint8_t li_in = 0; - uint8_t li_out = 3; - - /* Unpack the key and the input block */ - s = le_load_word32(key); - l[0] = le_load_word32(key + 4); - l[1] = le_load_word32(key + 8); - l[2] = le_load_word32(key + 12); - y = le_load_word32(input); - x = le_load_word32(input + 4); - - /* Perform all encryption rounds except the last */ - for (round = 0; round < 26; ++round) { - /* Perform the round with the current key schedule word */ - x = (rightRotate8(x) + y) ^ s; - y = leftRotate3(y) ^ x; - - /* Calculate the next key schedule word */ - l[li_out] = (s + rightRotate8(l[li_in])) ^ round; - s = leftRotate3(s) ^ l[li_out]; - li_in = (li_in + 1) & 0x03; - li_out = (li_out + 1) & 0x03; - } - - /* Perform the last encryption round and write the result to the output */ - x = (rightRotate8(x) + y) ^ s; - y = leftRotate3(y) ^ x; - le_store_word32(output, y); - le_store_word32(output + 4, x); -} - int comet_64_speck_aead_encrypt (unsigned char *c, unsigned long long *clen, const unsigned char *m, unsigned long long mlen, @@ -547,23 +496,23 @@ int comet_64_speck_aead_encrypt /* Set up the initial state of Y and Z */ memset(Y, 0, 8); - speck64_128_comet_encrypt(k, Y, Y); + speck64_128_encrypt(k, Y, Y); memcpy(Z, npub, 15); Z[15] = 0; lw_xor_block(Z, k, 16); /* Process the associated data */ if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen); + comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); /* Encrypt the plaintext to produce the ciphertext */ if (mlen > 0) - comet_encrypt_64(Y, Z, speck64_128_comet_encrypt, c, m, mlen); + comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen); /* Generate the authentication tag */ Z[15] ^= 0x80; comet_adjust_block_key(Z); - speck64_128_comet_encrypt(Z, c + mlen, Y); + speck64_128_encrypt(Z, c + mlen, Y); return 0; } @@ -586,22 +535,22 @@ int comet_64_speck_aead_decrypt /* Set up the initial state of Y and Z */ memset(Y, 0, 8); - speck64_128_comet_encrypt(k, Y, Y); + speck64_128_encrypt(k, Y, Y); memcpy(Z, npub, 15); Z[15] = 0; lw_xor_block(Z, k, 16); /* Process the associated data */ if (adlen > 0) - comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen); + comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen); /* Decrypt the ciphertext to produce the plaintext */ if (clen > COMET_64_TAG_SIZE) - comet_decrypt_64(Y, Z, speck64_128_comet_encrypt, m, c, *mlen); + comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen); /* Check the authentication tag */ Z[15] ^= 0x80; comet_adjust_block_key(Z); - speck64_128_comet_encrypt(Z, Y, Y); + speck64_128_encrypt(Z, Y, Y); return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE); } diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham-avr.S b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham-avr.S new file mode 100644 index 0000000..514a09a --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham-avr.S @@ -0,0 +1,915 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global cham128_128_encrypt + .type cham128_128_encrypt, @function +cham128_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 48 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+21,r2 + std Y+22,r3 + std Y+23,r4 + std Y+24,r5 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+17,r2 + std Y+18,r3 + std Y+19,r4 + std Y+20,r5 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+29,r2 + std Y+30,r3 + std Y+31,r4 + std Y+32,r5 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + movw r22,r18 + movw r24,r20 + movw r6,r18 + movw r8,r20 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r18,r6 + eor r19,r7 + eor r20,r8 + eor r21,r9 + movw r2,r18 + movw r4,r20 + eor r18,r25 + eor r19,r22 + eor r20,r23 + eor r21,r24 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Y+25,r2 + std Y+26,r3 + std Y+27,r4 + std Y+28,r5 + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r18,X+ + ld r22,X+ + ld r23,X+ + ld r24,X+ + ld r25,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + mov r30,r1 +197: + eor r19,r30 + movw r10,r22 + movw r12,r24 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+1 + eor r10,r0 + ldd r0,Y+2 + eor r11,r0 + ldd r0,Y+3 + eor r12,r0 + ldd r0,Y+4 + eor r13,r0 + add r19,r10 + adc r20,r11 + adc r21,r12 + adc r18,r13 + inc r30 + eor r22,r30 + mov r0,r5 + mov r5,r4 + mov r4,r3 + mov r3,r2 + mov r2,r0 + ldd r10,Y+5 + ldd r11,Y+6 + ldd r12,Y+7 + ldd r13,Y+8 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + add r22,r10 + adc r23,r11 + adc r24,r12 + adc r25,r13 + lsl r22 + rol r23 + rol r24 + rol r25 + adc r22,r1 + inc r30 + eor r3,r30 + movw r10,r6 + movw r12,r8 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+9 + eor r10,r0 + ldd r0,Y+10 + eor r11,r0 + ldd r0,Y+11 + eor r12,r0 + ldd r0,Y+12 + eor r13,r0 + add r3,r10 + adc r4,r11 + adc r5,r12 + adc r2,r13 + inc r30 + eor r6,r30 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r10,Y+13 + ldd r11,Y+14 + ldd r12,Y+15 + ldd r13,Y+16 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + add r6,r10 + adc r7,r11 + adc r8,r12 + adc r9,r13 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + inc r30 + eor r19,r30 + movw r10,r22 + movw r12,r24 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+17 + eor r10,r0 + ldd r0,Y+18 + eor r11,r0 + ldd r0,Y+19 + eor r12,r0 + ldd r0,Y+20 + eor r13,r0 + add r19,r10 + adc r20,r11 + adc r21,r12 + adc r18,r13 + inc r30 + eor r22,r30 + mov r0,r5 + mov r5,r4 + mov r4,r3 + mov r3,r2 + mov r2,r0 + ldd r10,Y+21 + ldd r11,Y+22 + ldd r12,Y+23 + ldd r13,Y+24 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + add r22,r10 + adc r23,r11 + adc r24,r12 + adc r25,r13 + lsl r22 + rol r23 + rol r24 + rol r25 + adc r22,r1 + inc r30 + eor r3,r30 + movw r10,r6 + movw r12,r8 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + ldd r0,Y+25 + eor r10,r0 + ldd r0,Y+26 + eor r11,r0 + ldd r0,Y+27 + eor r12,r0 + ldd r0,Y+28 + eor r13,r0 + add r3,r10 + adc r4,r11 + adc r5,r12 + adc r2,r13 + inc r30 + eor r6,r30 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r10,Y+29 + ldd r11,Y+30 + ldd r12,Y+31 + ldd r13,Y+32 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + add r6,r10 + adc r7,r11 + adc r8,r12 + adc r9,r13 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + inc r30 + ldi r31,80 + cpse r30,r31 + rjmp 197b + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r18 + st X+,r22 + st X+,r23 + st X+,r24 + st X+,r25 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size cham128_128_encrypt, .-cham128_128_encrypt + + .text +.global cham64_128_encrypt + .type cham64_128_encrypt, @function +cham64_128_encrypt: + push r28 + push r29 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 38 + ld r18,Z + ldd r19,Z+1 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+1,r18 + std Y+2,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+19,r22 + std Y+20,r23 + ldd r18,Z+2 + ldd r19,Z+3 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+3,r18 + std Y+4,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+17,r22 + std Y+18,r23 + ldd r18,Z+4 + ldd r19,Z+5 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+5,r18 + std Y+6,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+23,r22 + std Y+24,r23 + ldd r18,Z+6 + ldd r19,Z+7 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+7,r18 + std Y+8,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+21,r22 + std Y+22,r23 + ldd r18,Z+8 + ldd r19,Z+9 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+9,r18 + std Y+10,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+27,r22 + std Y+28,r23 + ldd r18,Z+10 + ldd r19,Z+11 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+11,r18 + std Y+12,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+25,r22 + std Y+26,r23 + ldd r18,Z+12 + ldd r19,Z+13 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+13,r18 + std Y+14,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+31,r22 + std Y+32,r23 + ldd r18,Z+14 + ldd r19,Z+15 + movw r20,r18 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r18,r24 + eor r19,r25 + movw r22,r18 + eor r18,r21 + eor r19,r20 + std Y+15,r18 + std Y+16,r19 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + eor r22,r25 + eor r23,r24 + std Y+29,r22 + std Y+30,r23 + ld r19,X+ + ld r18,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r24,X+ + ld r25,X+ + mov r16,r1 +201: + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+1 + eor r30,r0 + ldd r0,Y+2 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+3 + ldd r31,Y+4 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+5 + eor r30,r0 + ldd r0,Y+6 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+7 + ldd r31,Y+8 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+9 + eor r30,r0 + ldd r0,Y+10 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+11 + ldd r31,Y+12 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+13 + eor r30,r0 + ldd r0,Y+14 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+15 + ldd r31,Y+16 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+17 + eor r30,r0 + ldd r0,Y+18 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+19 + ldd r31,Y+20 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+21 + eor r30,r0 + ldd r0,Y+22 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+23 + ldd r31,Y+24 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + eor r19,r16 + movw r30,r20 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+25 + eor r30,r0 + ldd r0,Y+26 + eor r31,r0 + add r19,r30 + adc r18,r31 + inc r16 + eor r20,r16 + mov r0,r23 + mov r23,r22 + mov r22,r0 + ldd r30,Y+27 + ldd r31,Y+28 + eor r30,r22 + eor r31,r23 + add r20,r30 + adc r21,r31 + lsl r20 + rol r21 + adc r20,r1 + inc r16 + eor r23,r16 + movw r30,r24 + lsl r30 + rol r31 + adc r30,r1 + ldd r0,Y+29 + eor r30,r0 + ldd r0,Y+30 + eor r31,r0 + add r23,r30 + adc r22,r31 + inc r16 + eor r24,r16 + mov r0,r19 + mov r19,r18 + mov r18,r0 + ldd r30,Y+31 + ldd r31,Y+32 + eor r30,r18 + eor r31,r19 + add r24,r30 + adc r25,r31 + lsl r24 + rol r25 + adc r24,r1 + inc r16 + ldi r17,80 + cpse r16,r17 + rjmp 201b + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r19 + st X+,r18 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r24 + st X+,r25 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r29 + pop r28 + ret + .size cham64_128_encrypt, .-cham64_128_encrypt + +#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham.c index e097dbd..23351a3 100644 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham.c +++ b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-cham.c @@ -23,6 +23,8 @@ #include "internal-cham.h" #include "internal-util.h" +#if !defined(__AVR__) + void cham128_128_encrypt (const unsigned char *key, unsigned char *output, const unsigned char *input) @@ -132,3 +134,5 @@ void cham64_128_encrypt le_store_word16(output + 4, x2); le_store_word16(output + 6, x3); } + +#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64-avr.S b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64-avr.S new file mode 100644 index 0000000..d8d641e --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64-avr.S @@ -0,0 +1,272 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global speck64_128_encrypt + .type speck64_128_encrypt, @function +speck64_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ld r14,X+ + ld r15,X+ + ld r24,X+ + ld r25,X+ + ld r30,X+ + ld r31,X+ + ld r12,X+ + ld r13,X+ + mov r16,r1 +25: + add r31,r14 + adc r12,r15 + adc r13,r24 + adc r30,r25 + eor r31,r18 + eor r12,r19 + eor r13,r20 + eor r30,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r31 + eor r15,r12 + eor r24,r13 + eor r25,r30 + mov r0,r22 + mov r22,r23 + add r22,r18 + mov r23,r2 + adc r23,r19 + mov r2,r3 + adc r2,r20 + mov r3,r0 + adc r3,r21 + eor r22,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + inc r16 + add r12,r14 + adc r13,r15 + adc r30,r24 + adc r31,r25 + eor r12,r18 + eor r13,r19 + eor r30,r20 + eor r31,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r12 + eor r15,r13 + eor r24,r30 + eor r25,r31 + mov r0,r4 + mov r4,r5 + add r4,r18 + mov r5,r6 + adc r5,r19 + mov r6,r7 + adc r6,r20 + mov r7,r0 + adc r7,r21 + eor r4,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + inc r16 + add r13,r14 + adc r30,r15 + adc r31,r24 + adc r12,r25 + eor r13,r18 + eor r30,r19 + eor r31,r20 + eor r12,r21 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r13 + eor r15,r30 + eor r24,r31 + eor r25,r12 + mov r0,r12 + mov r12,r31 + mov r31,r30 + mov r30,r13 + mov r13,r0 + mov r0,r8 + mov r8,r9 + add r8,r18 + mov r9,r10 + adc r9,r19 + mov r10,r11 + adc r10,r20 + mov r11,r0 + adc r11,r21 + eor r8,r16 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + inc r16 + ldi r17,27 + cpse r16,r17 + rjmp 25b + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + st X+,r30 + st X+,r31 + st X+,r12 + st X+,r13 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size speck64_128_encrypt, .-speck64_128_encrypt + +#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.c b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.c new file mode 100644 index 0000000..494c801 --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.c @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-speck64.h" +#include "internal-util.h" + +#if !defined(__AVR__) + +void speck64_128_encrypt + (const unsigned char *key, unsigned char *output, + const unsigned char *input) +{ + uint32_t l0, l1, l2, s; + uint32_t x, y; + uint8_t round; + + /* Unpack the key and the input block */ + s = le_load_word32(key); + l0 = le_load_word32(key + 4); + l1 = le_load_word32(key + 8); + l2 = le_load_word32(key + 12); + y = le_load_word32(input); + x = le_load_word32(input + 4); + + /* Perform all 27 encryption rounds, in groups of 3 */ + #define round_xy() \ + do { \ + x = (rightRotate8(x) + y) ^ s; \ + y = leftRotate3(y) ^ x; \ + } while (0) + #define schedule(l) \ + do { \ + l = (s + rightRotate8(l)) ^ round; \ + s = leftRotate3(s) ^ l; \ + ++round; \ + } while (0) + for (round = 0; round < 27; ) { + round_xy(); + schedule(l0); + round_xy(); + schedule(l1); + round_xy(); + schedule(l2); + } + + /* Write the result to the output */ + le_store_word32(output, y); + le_store_word32(output + 4, x); +} + +#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.h new file mode 100644 index 0000000..fdf840a --- /dev/null +++ b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-speck64.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SPECK64_H +#define LW_INTERNAL_SPECK64_H + +/** + * \file internal-speck64.h + * \brief SPECK-64 block cipher. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order. + * + * \param key Points to the 16 bytes of the key. + * \param output Output buffer which must be at least 8 bytes in length. + * \param input Input buffer which must be at least 8 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * \note This version expects the key, input, and output to be in + * little-endian byte order, as expected by the COMET specification. + */ +void speck64_128_encrypt + (const unsigned char *key, unsigned char *output, + const unsigned char *input); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-util.h b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-util.h +++ b/comet/Implementations/crypto_aead/comet64speckv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.c b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.h b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/api.h b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.c b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.c deleted file mode 100644 index e963903..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.c +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "drygascon.h" -#include "internal-drysponge.h" -#include - -aead_cipher_t const drygascon128_cipher = { - "DryGASCON128", - DRYGASCON128_KEY_SIZE, - DRYGASCON128_NONCE_SIZE, - DRYGASCON128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_aead_encrypt, - drygascon128_aead_decrypt -}; - -aead_cipher_t const drygascon256_cipher = { - "DryGASCON256", - DRYGASCON256_KEY_SIZE, - DRYGASCON256_NONCE_SIZE, - DRYGASCON256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_aead_encrypt, - drygascon256_aead_decrypt -}; - -aead_hash_algorithm_t const drygascon128_hash_algorithm = { - "DryGASCON128-HASH", - sizeof(int), - DRYGASCON128_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const drygascon256_hash_algorithm = { - "DryGASCON256-HASH", - sizeof(int), - DRYGASCON256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Processes associated data for DryGASCON128. - * - * \param state DrySPONGE128 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon128_process_ad - (drysponge128_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(state, ad, DRYSPONGE128_RATE); - drysponge128_g_core(state); - ad += DRYSPONGE128_RATE; - adlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN128_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN128_FINAL; - if (adlen < DRYSPONGE128_RATE) - state->domain |= DRYDOMAIN128_PADDED; - drysponge128_f_absorb(state, ad, (unsigned)adlen); - drysponge128_g(state); -} - -/** - * \brief Processes associated data for DryGASCON256. - * - * \param state DrySPONGE256 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon256_process_ad - (drysponge256_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(state, ad, DRYSPONGE256_RATE); - drysponge256_g_core(state); - ad += DRYSPONGE256_RATE; - adlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN256_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN256_FINAL; - if (adlen < DRYSPONGE256_RATE) - state->domain |= DRYDOMAIN256_PADDED; - drysponge256_f_absorb(state, ad, (unsigned)adlen); - drysponge256_g(state); -} - -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge128_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - mlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (mlen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)mlen; - drysponge128_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge128_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, DRYGASCON128_TAG_SIZE); - return 0; -} - -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON128_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON128_TAG_SIZE; - drysponge128_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE128_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE128_RATE); - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - clen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (clen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge128_f_absorb(&state, m, temp); - drysponge128_g(&state); - c += temp; - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state.r.B, c, DRYGASCON128_TAG_SIZE); -} - -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge256_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - mlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (mlen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)mlen; - drysponge256_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge256_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, 16); - drysponge256_g(&state); - memcpy(c + 16, state.r.B, 16); - return 0; -} - -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned char *mtemp = m; - unsigned temp; - int result; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON256_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON256_TAG_SIZE; - drysponge256_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE256_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE256_RATE); - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - clen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (clen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge256_f_absorb(&state, m, temp); - drysponge256_g(&state); - c += temp; - } - - /* Check the authentication tag which is split into two pieces */ - result = aead_check_tag(0, 0, state.r.B, c, 16); - drysponge256_g(&state); - return aead_check_tag_precheck - (mtemp, *mlen, state.r.B, c + 16, 16, ~result); -} - -/** - * \brief Precomputed initialization vector for DryGASCON128-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE128. - */ -static unsigned char const drygascon128_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89 -}; - -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge128_state_t state; - memcpy(state.c.B, drygascon128_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon128_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE128_ROUNDS; - drygascon128_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge128_g(&state); - memcpy(out + 16, state.r.B, 16); - return 0; -} - -/** - * \brief Precomputed initialization vector for DryGASCON256-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE256. - */ -static unsigned char const drygascon256_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0x45, 0x28, 0x21, 0xe6, 0x38, 0xd0, 0x13, 0x77, - 0xbe, 0x54, 0x66, 0xcf, 0x34, 0xe9, 0x0c, 0x6c -}; - -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge256_state_t state; - memcpy(state.c.B, drygascon256_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon256_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE256_ROUNDS; - drygascon256_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 16, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 32, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 48, state.r.B, 16); - return 0; -} diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.h b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.h deleted file mode 100644 index 12e18c3..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/drygascon.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_DRYGASCON_H -#define LWCRYPTO_DRYGASCON_H - -#include "aead-common.h" - -/** - * \file drygascon.h - * \brief DryGASCON authenticated encryption algorithm. - * - * DryGASCON is a family of authenticated encryption algorithms based - * around a generalised version of the ASCON permutation. DryGASCON - * is designed to provide some protection against power analysis. - * - * There are four algorithms in the DryGASCON family: - * - * \li DryGASCON128 is an authenticated encryption algorithm with a - * 128-bit key, a 128-bit nonce, and a 128-bit authentication tag. - * \li DryGASCON256 is an authenticated encryption algorithm with a - * 256-bit key, a 128-bit nonce, and a 128-256 authentication tag. - * \li DryGASCON128-HASH is a hash algorithm with a 256-bit output. - * \li DryGASCON256-HASH is a hash algorithm with a 512-bit output. - * - * DryGASCON128 and DryGASCON128-HASH are the primary members of the family. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for DryGASCON128. - */ -#define DRYGASCON128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for DryGASCON128. - */ -#define DRYGASCON128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for DryGASCON128. - */ -#define DRYGASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON128-HASH. - */ -#define DRYGASCON128_HASH_SIZE 32 - -/** - * \brief Size of the key for DryGASCON256. - */ -#define DRYGASCON256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for DryGASCON256. - */ -#define DRYGASCON256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for DryGASCON256. - */ -#define DRYGASCON256_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON256-HASH. - */ -#define DRYGASCON256_HASH_SIZE 64 - -/** - * \brief Meta-information block for the DryGASCON128 cipher. - */ -extern aead_cipher_t const drygascon128_cipher; - -/** - * \brief Meta-information block for the DryGASCON256 cipher. - */ -extern aead_cipher_t const drygascon256_cipher; - -/** - * \brief Meta-information block for DryGASCON128-HASH. - */ -extern aead_hash_algorithm_t const drygascon128_hash_algorithm; - -/** - * \brief Meta-information block for DryGASCON256-HASH. - */ -extern aead_hash_algorithm_t const drygascon256_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with DryGASCON128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon128_aead_decrypt() - */ -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon128_aead_encrypt() - */ -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with DryGASCON256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon256_aead_decrypt() - */ -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon256_aead_encrypt() - */ -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with DRYGASCON128. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON128_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with DRYGASCON256. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/encrypt.c b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/encrypt.c deleted file mode 100644 index 663de84..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "drygascon.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return drygascon128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return drygascon128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge-avr.S b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge-avr.S deleted file mode 100644 index 84d0ff8..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge-avr.S +++ /dev/null @@ -1,5092 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gascon128_core_round - .type gascon128_core_round, @function -gascon128_core_round: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - eor r4,r22 - ldd r23,Z+8 - ldd r12,Z+24 - ldd r13,Z+32 - eor r18,r13 - eor r4,r23 - eor r13,r12 - mov r14,r23 - mov r0,r18 - com r0 - and r14,r0 - mov r15,r4 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r4 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r18 - mov r0,r13 - com r0 - and r16,r0 - eor r18,r15 - eor r23,r24 - eor r4,r25 - eor r12,r16 - eor r13,r14 - eor r23,r18 - eor r12,r4 - eor r18,r13 - com r4 - st Z,r18 - std Z+8,r23 - std Z+24,r12 - std Z+32,r13 - ldd r23,Z+9 - ldd r12,Z+25 - ldd r13,Z+33 - eor r19,r13 - eor r5,r23 - eor r13,r12 - mov r14,r23 - mov r0,r19 - com r0 - and r14,r0 - mov r15,r5 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r5 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r19 - mov r0,r13 - com r0 - and r16,r0 - eor r19,r15 - eor r23,r24 - eor r5,r25 - eor r12,r16 - eor r13,r14 - eor r23,r19 - eor r12,r5 - eor r19,r13 - com r5 - std Z+1,r19 - std Z+9,r23 - std Z+25,r12 - std Z+33,r13 - ldd r23,Z+10 - ldd r12,Z+26 - ldd r13,Z+34 - eor r20,r13 - eor r6,r23 - eor r13,r12 - mov r14,r23 - mov r0,r20 - com r0 - and r14,r0 - mov r15,r6 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r6 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r20 - mov r0,r13 - com r0 - and r16,r0 - eor r20,r15 - eor r23,r24 - eor r6,r25 - eor r12,r16 - eor r13,r14 - eor r23,r20 - eor r12,r6 - eor r20,r13 - com r6 - std Z+2,r20 - std Z+10,r23 - std Z+26,r12 - std Z+34,r13 - ldd r23,Z+11 - ldd r12,Z+27 - ldd r13,Z+35 - eor r21,r13 - eor r7,r23 - eor r13,r12 - mov r14,r23 - mov r0,r21 - com r0 - and r14,r0 - mov r15,r7 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r7 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r21 - mov r0,r13 - com r0 - and r16,r0 - eor r21,r15 - eor r23,r24 - eor r7,r25 - eor r12,r16 - eor r13,r14 - eor r23,r21 - eor r12,r7 - eor r21,r13 - com r7 - std Z+3,r21 - std Z+11,r23 - std Z+27,r12 - std Z+35,r13 - ldd r23,Z+12 - ldd r12,Z+28 - ldd r13,Z+36 - eor r26,r13 - eor r8,r23 - eor r13,r12 - mov r14,r23 - mov r0,r26 - com r0 - and r14,r0 - mov r15,r8 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r8 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r26 - mov r0,r13 - com r0 - and r16,r0 - eor r26,r15 - eor r23,r24 - eor r8,r25 - eor r12,r16 - eor r13,r14 - eor r23,r26 - eor r12,r8 - eor r26,r13 - com r8 - std Z+4,r26 - std Z+12,r23 - std Z+28,r12 - std Z+36,r13 - ldd r23,Z+13 - ldd r12,Z+29 - ldd r13,Z+37 - eor r27,r13 - eor r9,r23 - eor r13,r12 - mov r14,r23 - mov r0,r27 - com r0 - and r14,r0 - mov r15,r9 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r9 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r27 - mov r0,r13 - com r0 - and r16,r0 - eor r27,r15 - eor r23,r24 - eor r9,r25 - eor r12,r16 - eor r13,r14 - eor r23,r27 - eor r12,r9 - eor r27,r13 - com r9 - std Z+5,r27 - std Z+13,r23 - std Z+29,r12 - std Z+37,r13 - ldd r23,Z+14 - ldd r12,Z+30 - ldd r13,Z+38 - eor r2,r13 - eor r10,r23 - eor r13,r12 - mov r14,r23 - mov r0,r2 - com r0 - and r14,r0 - mov r15,r10 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r10 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r2 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r15 - eor r23,r24 - eor r10,r25 - eor r12,r16 - eor r13,r14 - eor r23,r2 - eor r12,r10 - eor r2,r13 - com r10 - std Z+6,r2 - std Z+14,r23 - std Z+30,r12 - std Z+38,r13 - ldd r23,Z+15 - ldd r12,Z+31 - ldd r13,Z+39 - eor r3,r13 - eor r11,r23 - eor r13,r12 - mov r14,r23 - mov r0,r3 - com r0 - and r14,r0 - mov r15,r11 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r11 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r3 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r15 - eor r23,r24 - eor r11,r25 - eor r12,r16 - eor r13,r14 - eor r23,r3 - eor r12,r11 - eor r3,r13 - com r11 - std Z+7,r3 - std Z+15,r23 - std Z+31,r12 - std Z+39,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r18 - std Z+25,r19 - std Z+26,r20 - std Z+27,r21 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gascon128_core_round, .-gascon128_core_round - - .text -.global drysponge128_g - .type drysponge128_g, @function -drysponge128_g: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - subi r30,180 - sbci r31,255 - ld r19,Z - subi r30,76 - sbc r31,r1 - ldi r18,240 - std Z+40,r1 - std Z+41,r1 - std Z+42,r1 - std Z+43,r1 - std Z+44,r1 - std Z+45,r1 - std Z+46,r1 - std Z+47,r1 - std Z+48,r1 - std Z+49,r1 - std Z+50,r1 - std Z+51,r1 - std Z+52,r1 - std Z+53,r1 - std Z+54,r1 - std Z+55,r1 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 -38: - eor r4,r18 - ldd r12,Z+8 - ldd r13,Z+24 - ldd r14,Z+32 - eor r20,r14 - eor r4,r12 - eor r14,r13 - mov r15,r12 - mov r0,r20 - com r0 - and r15,r0 - mov r24,r4 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r4 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r20 - mov r0,r14 - com r0 - and r17,r0 - eor r20,r24 - eor r12,r25 - eor r4,r16 - eor r13,r17 - eor r14,r15 - eor r12,r20 - eor r13,r4 - eor r20,r14 - com r4 - st Z,r20 - std Z+8,r12 - std Z+24,r13 - std Z+32,r14 - ldd r12,Z+9 - ldd r13,Z+25 - ldd r14,Z+33 - eor r21,r14 - eor r5,r12 - eor r14,r13 - mov r15,r12 - mov r0,r21 - com r0 - and r15,r0 - mov r24,r5 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r5 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r21 - mov r0,r14 - com r0 - and r17,r0 - eor r21,r24 - eor r12,r25 - eor r5,r16 - eor r13,r17 - eor r14,r15 - eor r12,r21 - eor r13,r5 - eor r21,r14 - com r5 - std Z+1,r21 - std Z+9,r12 - std Z+25,r13 - std Z+33,r14 - ldd r12,Z+10 - ldd r13,Z+26 - ldd r14,Z+34 - eor r22,r14 - eor r6,r12 - eor r14,r13 - mov r15,r12 - mov r0,r22 - com r0 - and r15,r0 - mov r24,r6 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r6 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r22 - mov r0,r14 - com r0 - and r17,r0 - eor r22,r24 - eor r12,r25 - eor r6,r16 - eor r13,r17 - eor r14,r15 - eor r12,r22 - eor r13,r6 - eor r22,r14 - com r6 - std Z+2,r22 - std Z+10,r12 - std Z+26,r13 - std Z+34,r14 - ldd r12,Z+11 - ldd r13,Z+27 - ldd r14,Z+35 - eor r23,r14 - eor r7,r12 - eor r14,r13 - mov r15,r12 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r7 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r7 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r23 - mov r0,r14 - com r0 - and r17,r0 - eor r23,r24 - eor r12,r25 - eor r7,r16 - eor r13,r17 - eor r14,r15 - eor r12,r23 - eor r13,r7 - eor r23,r14 - com r7 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r14 - ldd r12,Z+12 - ldd r13,Z+28 - ldd r14,Z+36 - eor r26,r14 - eor r8,r12 - eor r14,r13 - mov r15,r12 - mov r0,r26 - com r0 - and r15,r0 - mov r24,r8 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r8 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r26 - mov r0,r14 - com r0 - and r17,r0 - eor r26,r24 - eor r12,r25 - eor r8,r16 - eor r13,r17 - eor r14,r15 - eor r12,r26 - eor r13,r8 - eor r26,r14 - com r8 - std Z+4,r26 - std Z+12,r12 - std Z+28,r13 - std Z+36,r14 - ldd r12,Z+13 - ldd r13,Z+29 - ldd r14,Z+37 - eor r27,r14 - eor r9,r12 - eor r14,r13 - mov r15,r12 - mov r0,r27 - com r0 - and r15,r0 - mov r24,r9 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r9 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r27 - mov r0,r14 - com r0 - and r17,r0 - eor r27,r24 - eor r12,r25 - eor r9,r16 - eor r13,r17 - eor r14,r15 - eor r12,r27 - eor r13,r9 - eor r27,r14 - com r9 - std Z+5,r27 - std Z+13,r12 - std Z+29,r13 - std Z+37,r14 - ldd r12,Z+14 - ldd r13,Z+30 - ldd r14,Z+38 - eor r2,r14 - eor r10,r12 - eor r14,r13 - mov r15,r12 - mov r0,r2 - com r0 - and r15,r0 - mov r24,r10 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r10 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r2 - mov r0,r14 - com r0 - and r17,r0 - eor r2,r24 - eor r12,r25 - eor r10,r16 - eor r13,r17 - eor r14,r15 - eor r12,r2 - eor r13,r10 - eor r2,r14 - com r10 - std Z+6,r2 - std Z+14,r12 - std Z+30,r13 - std Z+38,r14 - ldd r12,Z+15 - ldd r13,Z+31 - ldd r14,Z+39 - eor r3,r14 - eor r11,r12 - eor r14,r13 - mov r15,r12 - mov r0,r3 - com r0 - and r15,r0 - mov r24,r11 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r11 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r3 - mov r0,r14 - com r0 - and r17,r0 - eor r3,r24 - eor r12,r25 - eor r11,r16 - eor r13,r17 - eor r14,r15 - eor r12,r3 - eor r13,r11 - eor r3,r14 - com r11 - std Z+7,r3 - std Z+15,r12 - std Z+31,r13 - std Z+39,r14 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r20,Z+24 - ldd r21,Z+25 - ldd r22,Z+26 - ldd r23,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r20 - std Z+25,r21 - std Z+26,r22 - std Z+27,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r22,Z+34 - ldd r23,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r20 - std Z+33,r21 - std Z+34,r22 - std Z+35,r23 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - ldd r12,Z+40 - ldd r13,Z+41 - ldd r14,Z+42 - ldd r15,Z+43 - eor r12,r20 - eor r13,r21 - eor r14,r22 - eor r15,r23 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - std Z+40,r12 - std Z+41,r13 - std Z+42,r14 - std Z+43,r15 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - ldd r0,Z+24 - eor r12,r0 - ldd r0,Z+25 - eor r13,r0 - ldd r0,Z+26 - eor r14,r0 - ldd r0,Z+27 - eor r15,r0 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ldd r12,Z+48 - ldd r13,Z+49 - ldd r14,Z+50 - ldd r15,Z+51 - ldd r0,Z+8 - eor r12,r0 - ldd r0,Z+9 - eor r13,r0 - ldd r0,Z+10 - eor r14,r0 - ldd r0,Z+11 - eor r15,r0 - ldd r0,Z+28 - eor r12,r0 - ldd r0,Z+29 - eor r13,r0 - ldd r0,Z+30 - eor r14,r0 - ldd r0,Z+31 - eor r15,r0 - std Z+48,r12 - std Z+49,r13 - std Z+50,r14 - std Z+51,r15 - ldd r12,Z+52 - ldd r13,Z+53 - ldd r14,Z+54 - ldd r15,Z+55 - ldd r0,Z+12 - eor r12,r0 - ldd r0,Z+13 - eor r13,r0 - ldd r0,Z+14 - eor r14,r0 - ldd r0,Z+15 - eor r15,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - std Z+52,r12 - std Z+53,r13 - std Z+54,r14 - std Z+55,r15 - subi r18,15 - dec r19 - breq 5904f - rjmp 38b -5904: - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size drysponge128_g, .-drysponge128_g - - .text -.global gascon256_core_round - .type gascon256_core_round, @function -gascon256_core_round: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 26 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r26,Z+ - ld r27,Z+ - ld r2,Z+ - ld r3,Z+ - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - eor r4,r22 - ld r22,Z - ldd r23,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r23,r22 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r22 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r18 - eor r12,r23 - eor r13,r4 - eor r15,r14 - eor r18,r24 - com r4 - std Y+1,r18 - st Z,r22 - std Z+8,r23 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r22,Z+1 - ldd r23,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r23,r22 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r22 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r19 - eor r12,r23 - eor r13,r5 - eor r15,r14 - eor r19,r24 - com r5 - std Y+2,r19 - std Z+1,r22 - std Z+9,r23 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r22,Z+2 - ldd r23,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r23,r22 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r22 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r20 - eor r12,r23 - eor r13,r6 - eor r15,r14 - eor r20,r24 - com r6 - std Y+3,r20 - std Z+2,r22 - std Z+10,r23 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r22,Z+3 - ldd r23,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r23,r22 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r22 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r21 - eor r12,r23 - eor r13,r7 - eor r15,r14 - eor r21,r24 - com r7 - std Y+4,r21 - std Z+3,r22 - std Z+11,r23 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r22,Z+4 - ldd r23,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r26,r24 - eor r23,r22 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r22 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r26 - eor r12,r23 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+5,r26 - std Z+4,r22 - std Z+12,r23 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r22,Z+5 - ldd r23,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r27,r24 - eor r23,r22 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r22 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r27 - eor r12,r23 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+6,r27 - std Z+5,r22 - std Z+13,r23 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r22,Z+6 - ldd r23,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r2,r24 - eor r23,r22 - eor r10,r12 - eor r14,r13 - eor r24,r15 - mov r17,r2 - mov r25,r22 - mov r0,r2 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r10 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r10 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r2 - eor r12,r23 - eor r13,r10 - eor r15,r14 - eor r2,r24 - com r10 - std Y+7,r2 - std Z+6,r22 - std Z+14,r23 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r22,Z+7 - ldd r23,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r3,r24 - eor r23,r22 - eor r11,r12 - eor r14,r13 - eor r24,r15 - mov r17,r3 - mov r25,r22 - mov r0,r3 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r11 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r11 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r3 - eor r12,r23 - eor r13,r11 - eor r15,r14 - eor r3,r24 - com r11 - std Y+8,r3 - std Z+7,r22 - std Z+15,r23 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - bst r22,0 - lsr r13 - ror r12 - ror r23 - ror r22 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r26 - std Z+21,r27 - std Z+22,r2 - std Z+23,r3 - movw r22,r4 - movw r12,r6 - movw r14,r8 - movw r24,r10 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r4 - eor r15,r5 - eor r24,r6 - eor r25,r7 - eor r22,r8 - eor r23,r9 - eor r12,r10 - eor r13,r11 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r14 - eor r5,r15 - eor r6,r24 - eor r7,r25 - eor r8,r22 - eor r9,r23 - eor r10,r12 - eor r11,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r12 - mov r12,r0 - mov r0,r23 - mov r23,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r26 - std Z+45,r27 - std Z+46,r2 - std Z+47,r3 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r26,Z+52 - ldd r27,Z+53 - ldd r2,Z+54 - ldd r3,Z+55 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r26 - std Z+53,r27 - std Z+54,r2 - std Z+55,r3 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r26,Z+60 - ldd r27,Z+61 - ldd r2,Z+62 - ldd r3,Z+63 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r26 - std Z+61,r27 - std Z+62,r2 - std Z+63,r3 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r2,Y+7 - ldd r3,Y+8 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+24,r4 - std Z+25,r5 - std Z+26,r6 - std Z+27,r7 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - st -Z,r3 - st -Z,r2 - st -Z,r27 - st -Z,r26 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - adiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gascon256_core_round, .-gascon256_core_round - - .text -.global drysponge256_g - .type drysponge256_g, @function -drysponge256_g: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 44 - subi r30,148 - sbci r31,255 - ld r19,Z - subi r30,108 - sbc r31,r1 - ldi r18,240 - std Y+25,r19 - std Y+26,r18 - std Y+9,r1 - std Y+10,r1 - std Y+11,r1 - std Y+12,r1 - std Y+13,r1 - std Y+14,r1 - std Y+15,r1 - std Y+16,r1 - std Y+17,r1 - std Y+18,r1 - std Y+19,r1 - std Y+20,r1 - std Y+21,r1 - std Y+22,r1 - std Y+23,r1 - std Y+24,r1 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r22,Z+ - ld r23,Z+ - ld r26,Z+ - ld r27,Z+ - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 -40: - ldd r24,Y+26 - eor r2,r24 - subi r24,15 - std Y+26,r24 - ld r10,Z - ldd r11,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r11,r10 - eor r2,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r10 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r2 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r2 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r18 - eor r12,r11 - eor r13,r2 - eor r15,r14 - eor r18,r24 - com r2 - std Y+1,r18 - st Z,r10 - std Z+8,r11 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r10,Z+1 - ldd r11,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r11,r10 - eor r3,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r10 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r3 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r3 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r19 - eor r12,r11 - eor r13,r3 - eor r15,r14 - eor r19,r24 - com r3 - std Y+2,r19 - std Z+1,r10 - std Z+9,r11 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r10,Z+2 - ldd r11,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r11,r10 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r10 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r20 - eor r12,r11 - eor r13,r4 - eor r15,r14 - eor r20,r24 - com r4 - std Y+3,r20 - std Z+2,r10 - std Z+10,r11 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r10,Z+3 - ldd r11,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r11,r10 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r10 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r21 - eor r12,r11 - eor r13,r5 - eor r15,r14 - eor r21,r24 - com r5 - std Y+4,r21 - std Z+3,r10 - std Z+11,r11 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r10,Z+4 - ldd r11,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r22,r24 - eor r11,r10 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r22 - mov r25,r10 - mov r0,r22 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r22 - eor r12,r11 - eor r13,r6 - eor r15,r14 - eor r22,r24 - com r6 - std Y+5,r22 - std Z+4,r10 - std Z+12,r11 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r10,Z+5 - ldd r11,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r23,r24 - eor r11,r10 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r23 - mov r25,r10 - mov r0,r23 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r23 - eor r12,r11 - eor r13,r7 - eor r15,r14 - eor r23,r24 - com r7 - std Y+6,r23 - std Z+5,r10 - std Z+13,r11 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r10,Z+6 - ldd r11,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r26,r24 - eor r11,r10 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r10 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r26 - eor r12,r11 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+7,r26 - std Z+6,r10 - std Z+14,r11 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r10,Z+7 - ldd r11,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r27,r24 - eor r11,r10 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r10 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r27 - eor r12,r11 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+8,r27 - std Z+7,r10 - std Z+15,r11 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - bst r10,0 - lsr r13 - ror r12 - ror r11 - ror r10 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r22 - std Z+13,r23 - std Z+14,r26 - std Z+15,r27 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r26,Z+22 - ldd r27,Z+23 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r23 - mov r23,r26 - mov r26,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r22 - std Z+21,r23 - std Z+22,r26 - std Z+23,r27 - movw r10,r2 - movw r12,r4 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - mov r0,r2 - mov r2,r4 - mov r4,r0 - mov r0,r3 - mov r3,r5 - mov r5,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - or r5,r0 - mov r0,r6 - mov r6,r8 - mov r8,r0 - mov r0,r7 - mov r7,r9 - mov r9,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r6,r10 - eor r7,r11 - eor r8,r12 - eor r9,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r26,Z+38 - ldd r27,Z+39 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r12 - mov r12,r0 - mov r0,r11 - mov r11,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r22 - std Z+37,r23 - std Z+38,r26 - std Z+39,r27 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r22,Z+44 - ldd r23,Z+45 - ldd r26,Z+46 - ldd r27,Z+47 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r22 - std Z+45,r23 - std Z+46,r26 - std Z+47,r27 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r22,Z+52 - ldd r23,Z+53 - ldd r26,Z+54 - ldd r27,Z+55 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r22 - std Z+53,r23 - std Z+54,r26 - std Z+55,r27 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r22,Z+60 - ldd r23,Z+61 - ldd r26,Z+62 - ldd r27,Z+63 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r22 - std Z+61,r23 - std Z+62,r26 - std Z+63,r27 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - ldd r10,Y+9 - ldd r11,Y+10 - ldd r12,Y+11 - ldd r13,Y+12 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - ldd r0,Z+12 - eor r10,r0 - ldd r0,Z+13 - eor r11,r0 - ldd r0,Z+14 - eor r12,r0 - ldd r0,Z+15 - eor r13,r0 - ldd r0,Z+32 - eor r10,r0 - ldd r0,Z+33 - eor r11,r0 - ldd r0,Z+34 - eor r12,r0 - ldd r0,Z+35 - eor r13,r0 - ldd r0,Z+52 - eor r10,r0 - ldd r0,Z+53 - eor r11,r0 - ldd r0,Z+54 - eor r12,r0 - ldd r0,Z+55 - eor r13,r0 - std Y+9,r10 - std Y+10,r11 - std Y+11,r12 - std Y+12,r13 - ldd r10,Y+13 - ldd r11,Y+14 - ldd r12,Y+15 - ldd r13,Y+16 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - ldd r0,Z+16 - eor r10,r0 - ldd r0,Z+17 - eor r11,r0 - ldd r0,Z+18 - eor r12,r0 - ldd r0,Z+19 - eor r13,r0 - ldd r0,Z+36 - eor r10,r0 - ldd r0,Z+37 - eor r11,r0 - ldd r0,Z+38 - eor r12,r0 - ldd r0,Z+39 - eor r13,r0 - ldd r0,Z+40 - eor r10,r0 - ldd r0,Z+41 - eor r11,r0 - ldd r0,Z+42 - eor r12,r0 - ldd r0,Z+43 - eor r13,r0 - std Y+13,r10 - std Y+14,r11 - std Y+15,r12 - std Y+16,r13 - ldd r10,Y+17 - ldd r11,Y+18 - ldd r12,Y+19 - ldd r13,Y+20 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - ld r0,Z - eor r10,r0 - ldd r0,Z+1 - eor r11,r0 - ldd r0,Z+2 - eor r12,r0 - ldd r0,Z+3 - eor r13,r0 - ldd r0,Z+20 - eor r10,r0 - ldd r0,Z+21 - eor r11,r0 - ldd r0,Z+22 - eor r12,r0 - ldd r0,Z+23 - eor r13,r0 - ldd r0,Z+44 - eor r10,r0 - ldd r0,Z+45 - eor r11,r0 - ldd r0,Z+46 - eor r12,r0 - ldd r0,Z+47 - eor r13,r0 - std Y+17,r10 - std Y+18,r11 - std Y+19,r12 - std Y+20,r13 - ldd r10,Y+21 - ldd r11,Y+22 - ldd r12,Y+23 - ldd r13,Y+24 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - ldd r0,Z+4 - eor r10,r0 - ldd r0,Z+5 - eor r11,r0 - ldd r0,Z+6 - eor r12,r0 - ldd r0,Z+7 - eor r13,r0 - ldd r0,Z+8 - eor r10,r0 - ldd r0,Z+9 - eor r11,r0 - ldd r0,Z+10 - eor r12,r0 - ldd r0,Z+11 - eor r13,r0 - ldd r0,Z+48 - eor r10,r0 - ldd r0,Z+49 - eor r11,r0 - ldd r0,Z+50 - eor r12,r0 - ldd r0,Z+51 - eor r13,r0 - std Y+21,r10 - std Y+22,r11 - std Y+23,r12 - std Y+24,r13 - ldd r10,Y+25 - dec r10 - std Y+25,r10 - breq 6623f - rjmp 40b -6623: - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - std Z+28,r6 - std Z+29,r7 - std Z+30,r8 - std Z+31,r9 - st -Z,r27 - st -Z,r26 - st -Z,r23 - st -Z,r22 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - ldi r25,72 - add r30,r25 - adc r31,r1 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - ldd r2,Y+17 - ldd r3,Y+18 - ldd r4,Y+19 - ldd r5,Y+20 - ldd r6,Y+21 - ldd r7,Y+22 - ldd r8,Y+23 - ldd r9,Y+24 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - std Z+12,r6 - std Z+13,r7 - std Z+14,r8 - std Z+15,r9 - adiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size drysponge256_g, .-drysponge256_g - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.c b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.c deleted file mode 100644 index 6dfe48c..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.c +++ /dev/null @@ -1,611 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-drysponge.h" -#include - -#if !defined(__AVR__) - -/* Right rotations in bit-interleaved format */ -#define intRightRotateEven(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, (bits)); \ - _x1 = rightRotate(_x1, (bits)); \ - _x0 | (((uint64_t)_x1) << 32); \ - })) -#define intRightRotateOdd(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, ((bits) + 1) % 32); \ - _x1 = rightRotate(_x1, (bits)); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate1_64(x) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate1(_x0); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate2_64(x) (intRightRotateEven((x), 1)) -#define intRightRotate3_64(x) (intRightRotateOdd((x), 1)) -#define intRightRotate4_64(x) (intRightRotateEven((x), 2)) -#define intRightRotate5_64(x) (intRightRotateOdd((x), 2)) -#define intRightRotate6_64(x) (intRightRotateEven((x), 3)) -#define intRightRotate7_64(x) (intRightRotateOdd((x), 3)) -#define intRightRotate8_64(x) (intRightRotateEven((x), 4)) -#define intRightRotate9_64(x) (intRightRotateOdd((x), 4)) -#define intRightRotate10_64(x) (intRightRotateEven((x), 5)) -#define intRightRotate11_64(x) (intRightRotateOdd((x), 5)) -#define intRightRotate12_64(x) (intRightRotateEven((x), 6)) -#define intRightRotate13_64(x) (intRightRotateOdd((x), 6)) -#define intRightRotate14_64(x) (intRightRotateEven((x), 7)) -#define intRightRotate15_64(x) (intRightRotateOdd((x), 7)) -#define intRightRotate16_64(x) (intRightRotateEven((x), 8)) -#define intRightRotate17_64(x) (intRightRotateOdd((x), 8)) -#define intRightRotate18_64(x) (intRightRotateEven((x), 9)) -#define intRightRotate19_64(x) (intRightRotateOdd((x), 9)) -#define intRightRotate20_64(x) (intRightRotateEven((x), 10)) -#define intRightRotate21_64(x) (intRightRotateOdd((x), 10)) -#define intRightRotate22_64(x) (intRightRotateEven((x), 11)) -#define intRightRotate23_64(x) (intRightRotateOdd((x), 11)) -#define intRightRotate24_64(x) (intRightRotateEven((x), 12)) -#define intRightRotate25_64(x) (intRightRotateOdd((x), 12)) -#define intRightRotate26_64(x) (intRightRotateEven((x), 13)) -#define intRightRotate27_64(x) (intRightRotateOdd((x), 13)) -#define intRightRotate28_64(x) (intRightRotateEven((x), 14)) -#define intRightRotate29_64(x) (intRightRotateOdd((x), 14)) -#define intRightRotate30_64(x) (intRightRotateEven((x), 15)) -#define intRightRotate31_64(x) (intRightRotateOdd((x), 15)) -#define intRightRotate32_64(x) (intRightRotateEven((x), 16)) -#define intRightRotate33_64(x) (intRightRotateOdd((x), 16)) -#define intRightRotate34_64(x) (intRightRotateEven((x), 17)) -#define intRightRotate35_64(x) (intRightRotateOdd((x), 17)) -#define intRightRotate36_64(x) (intRightRotateEven((x), 18)) -#define intRightRotate37_64(x) (intRightRotateOdd((x), 18)) -#define intRightRotate38_64(x) (intRightRotateEven((x), 19)) -#define intRightRotate39_64(x) (intRightRotateOdd((x), 19)) -#define intRightRotate40_64(x) (intRightRotateEven((x), 20)) -#define intRightRotate41_64(x) (intRightRotateOdd((x), 20)) -#define intRightRotate42_64(x) (intRightRotateEven((x), 21)) -#define intRightRotate43_64(x) (intRightRotateOdd((x), 21)) -#define intRightRotate44_64(x) (intRightRotateEven((x), 22)) -#define intRightRotate45_64(x) (intRightRotateOdd((x), 22)) -#define intRightRotate46_64(x) (intRightRotateEven((x), 23)) -#define intRightRotate47_64(x) (intRightRotateOdd((x), 23)) -#define intRightRotate48_64(x) (intRightRotateEven((x), 24)) -#define intRightRotate49_64(x) (intRightRotateOdd((x), 24)) -#define intRightRotate50_64(x) (intRightRotateEven((x), 25)) -#define intRightRotate51_64(x) (intRightRotateOdd((x), 25)) -#define intRightRotate52_64(x) (intRightRotateEven((x), 26)) -#define intRightRotate53_64(x) (intRightRotateOdd((x), 26)) -#define intRightRotate54_64(x) (intRightRotateEven((x), 27)) -#define intRightRotate55_64(x) (intRightRotateOdd((x), 27)) -#define intRightRotate56_64(x) (intRightRotateEven((x), 28)) -#define intRightRotate57_64(x) (intRightRotateOdd((x), 28)) -#define intRightRotate58_64(x) (intRightRotateEven((x), 29)) -#define intRightRotate59_64(x) (intRightRotateOdd((x), 29)) -#define intRightRotate60_64(x) (intRightRotateEven((x), 30)) -#define intRightRotate61_64(x) (intRightRotateOdd((x), 30)) -#define intRightRotate62_64(x) (intRightRotateEven((x), 31)) -#define intRightRotate63_64(x) (intRightRotateOdd((x), 31)) - -void gascon128_core_round(gascon128_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); -#endif - - /* Add the round constant to the middle of the state */ - x2 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x4; x2 ^= x1; x4 ^= x3; t0 = (~x0) & x1; t1 = (~x1) & x2; - t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x0; x0 ^= t1; - x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; x1 ^= x0; x3 ^= x2; - x0 ^= x4; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); -#endif -} - -void gascon256_core_round(gascon256_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; - uint64_t x8 = state->S[8]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); - uint64_t x8 = le_load_word64(state->B + 64); -#endif - - /* Add the round constant to the middle of the state */ - x4 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x8; x2 ^= x1; x4 ^= x3; x6 ^= x5; x8 ^= x7; t0 = (~x0) & x1; - t1 = (~x1) & x2; t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x5; - t5 = (~x5) & x6; t6 = (~x6) & x7; t7 = (~x7) & x8; t8 = (~x8) & x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t5; x5 ^= t6; x6 ^= t7; - x7 ^= t8; x8 ^= t0; x1 ^= x0; x3 ^= x2; x5 ^= x4; x7 ^= x6; x0 ^= x8; - x4 = ~x4; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - x5 ^= intRightRotate31_64(x5) ^ intRightRotate26_64(x5); - x6 ^= intRightRotate53_64(x6) ^ intRightRotate58_64(x6); - x7 ^= intRightRotate9_64(x7) ^ intRightRotate46_64(x7); - x8 ^= intRightRotate43_64(x8) ^ intRightRotate50_64(x8); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; - state->S[8] = x8; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); - le_store_word64(state->B + 64, x8); -#endif -} - -void drysponge128_g(drysponge128_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes */ - gascon128_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon128_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4]; - } -} - -void drysponge256_g(drysponge256_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes. - * And so on for a total of 64 bytes XOR'ed into the output data. */ - gascon256_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon256_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - } -} - -#endif /* !__AVR__ */ - -void drysponge128_g_core(drysponge128_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon128_core_round(&(state->c), round); -} - -void drysponge256_g_core(drysponge256_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon256_core_round(&(state->c), round); -} - -/** - * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) - * \brief Selects an element of x in constant time. - * - * \param x Points to the four elements of x. - * \param index Index of which element to extract between 0 and 3. - * - * \return The selected element of x. - */ -#if !defined(__AVR__) -STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) -{ - /* We need to be careful how we select each element of x because - * we are doing a data-dependent fetch here. Do the fetch in a way - * that should avoid cache timing issues by fetching every element - * of x and masking away the ones we don't want. - * - * There is a possible side channel here with respect to power analysis. - * The "mask" value will be all-ones for the selected index and all-zeroes - * for the other indexes. This may show up as different power consumption - * for the "result ^= x[i] & mask" statement when i is the selected index. - * Such a side channel could in theory allow reading the plaintext input - * to the cipher by analysing the CPU's power consumption. - * - * The DryGASCON specification acknowledges the possibility of plaintext - * recovery in section 7.4. For software mitigation the specification - * suggests randomization of the indexes into c and x and randomization - * of the order of processing words. We aren't doing that here yet. - * Patches welcome to fix this. - */ - uint32_t mask = -((uint32_t)((0x04 - index) >> 2)); - uint32_t result = x[0] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x01)) >> 2)); - result ^= x[1] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x02)) >> 2)); - result ^= x[2] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); - return result ^ (x[3] & mask); -} -#else -/* AVR is more or less immune to cache timing issues because it doesn't - * have anything like an L1 or L2 cache. Select the word directly */ -#define drysponge_select_x(x, index) ((x)[(index)]) -#endif - -/** - * \brief Mixes a 32-bit value into the DrySPONGE128 state. - * - * \param state DrySPONGE128 state. - * \param data The data to be mixed in the bottom 10 bits. - */ -static void drysponge128_mix_phase_round - (drysponge128_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); -} - -/** - * \brief Mixes a 32-bit value into the DrySPONGE256 state. - * - * \param state DrySPONGE256 state. - * \param data The data to be mixed in the bottom 18 bits. - */ -static void drysponge256_mix_phase_round - (drysponge256_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); - state->c.W[10] ^= drysponge_select_x(state->x.W, (data >> 10) & 0x03); - state->c.W[12] ^= drysponge_select_x(state->x.W, (data >> 12) & 0x03); - state->c.W[14] ^= drysponge_select_x(state->x.W, (data >> 14) & 0x03); - state->c.W[16] ^= drysponge_select_x(state->x.W, (data >> 16) & 0x03); -} - -/** - * \brief Mixes an input block into a DrySPONGE128 state. - * - * \param state The DrySPONGE128 state. - * \param data Full rate block containing the input data. - */ -static void drysponge128_mix_phase - (drysponge128_state_t *state, const unsigned char data[DRYSPONGE128_RATE]) -{ - /* Mix 10-bit groups into the output, with the domain - * separator added to the last two groups */ - drysponge128_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[1] >> 2) | (((uint32_t)(data[2])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[2] >> 4) | (((uint32_t)(data[3])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[3] >> 6) | (((uint32_t)(data[4])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[5] | (((uint32_t)(data[6])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[6] >> 2) | (((uint32_t)(data[7])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[7] >> 4) | (((uint32_t)(data[8])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[8] >> 6) | (((uint32_t)(data[9])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[10] | (((uint32_t)(data[11])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[12] >> 4) | (((uint32_t)(data[13])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, ((data[13] >> 6) | (((uint32_t)(data[14])) << 2))); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, data[15] ^ state->domain); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, state->domain >> 10); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -/** - * \brief Mixes an input block into a DrySPONGE256 state. - * - * \param state The DrySPONGE256 state. - * \param data Full rate block containing the input data. - */ -static void drysponge256_mix_phase - (drysponge256_state_t *state, const unsigned char data[DRYSPONGE256_RATE]) -{ - /* Mix 18-bit groups into the output, with the domain in the last group */ - drysponge256_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8) | - (((uint32_t)(data[2])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[2] >> 2) | (((uint32_t)(data[3])) << 6) | - (((uint32_t)(data[4])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[4] >> 4) | (((uint32_t)(data[5])) << 4) | - (((uint32_t)(data[6])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[6] >> 6) | (((uint32_t)(data[7])) << 2) | - (((uint32_t)(data[8])) << 10)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, data[9] | (((uint32_t)(data[10])) << 8) | - (((uint32_t)(data[11])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6) | - (((uint32_t)(data[13])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[13] >> 4) | (((uint32_t)(data[14])) << 4) | - (((uint32_t)(data[15])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[15] >> 6) ^ state->domain); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE128_RATE) { - drysponge128_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE128_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE128_RATE - len - 1); - drysponge128_mix_phase(state, padded); - } -} - -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE256_RATE) { - drysponge256_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE256_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE256_RATE - len - 1); - drysponge256_mix_phase(state, padded); - } -} - -/** - * \brief Determine if some of the words of an "x" value are identical. - * - * \param x Points to the "x" buffer to check. - * - * \return Non-zero if some of the words are the same, zero if they are - * distinct from each other. - * - * We try to perform the check in constant time to avoid giving away - * any information about the value of the key. - */ -static int drysponge_x_words_are_same(const uint32_t x[4]) -{ - unsigned i, j; - int result = 0; - for (i = 0; i < 3; ++i) { - for (j = i + 1; j < 4; ++j) { - uint32_t check = x[i] ^ x[j]; - result |= (int)((0x100000000ULL - check) >> 32); - } - } - return result; -} - -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-128 state with repeated copies of the key */ - memcpy(state->c.B, key, 16); - memcpy(state->c.B + 16, key, 16); - memcpy(state->c.B + 32, key, 8); - - /* Generate the "x" value for the state. All four words of "x" - * must be unique because they will be used in drysponge_select_x() - * as stand-ins for the bit pairs 00, 01, 10, and 11. - * - * Run the core block operation over and over until "x" is unique. - * Technically the runtime here is key-dependent and not constant. - * If the input key is randomized, this should only take 1 round - * on average so it is "almost constant time". - */ - do { - gascon128_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE128_INIT_ROUNDS; - state->domain = DRYDOMAIN128_NONCE; - if (final_block) - state->domain |= DRYDOMAIN128_FINAL; - drysponge128_f_absorb(state, nonce, 16); - drysponge128_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE128_ROUNDS; -} - -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-256 state with repeated copies of the key */ - memcpy(state->c.B, key, 32); - memcpy(state->c.B + 32, key, 32); - memcpy(state->c.B + 64, key, 8); - - /* Generate the "x" value for the state */ - do { - gascon256_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE256_INIT_ROUNDS; - state->domain = DRYDOMAIN256_NONCE; - if (final_block) - state->domain |= DRYDOMAIN256_FINAL; - drysponge256_f_absorb(state, nonce, 16); - drysponge256_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE256_ROUNDS; -} diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.h b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.h deleted file mode 100644 index 05b0c16..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-drysponge.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_DRYSPONGE_H -#define LW_INTERNAL_DRYSPONGE_H - -#include "internal-util.h" - -/** - * \file internal-drysponge.h - * \brief Internal implementation of DrySPONGE for the DryGASCON cipher. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the GASCON-128 permutation state in bytes. - */ -#define GASCON128_STATE_SIZE 40 - -/** - * \brief Size of the GASCON-256 permutation state in bytes. - */ -#define GASCON256_STATE_SIZE 72 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE128. - */ -#define DRYSPONGE128_RATE 16 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE256. - */ -#define DRYSPONGE256_RATE 16 - -/** - * \brief Size of the "x" value for DrySPONGE128. - */ -#define DRYSPONGE128_XSIZE 16 - -/** - * \brief Size of the "x" value for DrySPONGE256. - */ -#define DRYSPONGE256_XSIZE 16 - -/** - * \brief Normal number of rounds for DrySPONGE128 when absorbing - * and squeezing data. - */ -#define DRYSPONGE128_ROUNDS 7 - -/** - * \brief Number of rounds for DrySPONGE128 during initialization. - */ -#define DRYSPONGE128_INIT_ROUNDS 11 - -/** - * \brief Normal number of rounds for DrySPONGE256 when absorbing - * and squeezing data. - */ -#define DRYSPONGE256_ROUNDS 8 - -/** - * \brief Number of rounds for DrySPONGE256 during initialization. - */ -#define DRYSPONGE256_INIT_ROUNDS 12 - -/** - * \brief DrySPONGE128 domain bit for a padded block. - */ -#define DRYDOMAIN128_PADDED (1 << 8) - -/** - * \brief DrySPONGE128 domain bit for a final block. - */ -#define DRYDOMAIN128_FINAL (1 << 9) - -/** - * \brief DrySPONGE128 domain value for processing the nonce. - */ -#define DRYDOMAIN128_NONCE (1 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the associated data. - */ -#define DRYDOMAIN128_ASSOC_DATA (2 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the message. - */ -#define DRYDOMAIN128_MESSAGE (3 << 10) - -/** - * \brief DrySPONGE256 domain bit for a padded block. - */ -#define DRYDOMAIN256_PADDED (1 << 2) - -/** - * \brief DrySPONGE256 domain bit for a final block. - */ -#define DRYDOMAIN256_FINAL (1 << 3) - -/** - * \brief DrySPONGE256 domain value for processing the nonce. - */ -#define DRYDOMAIN256_NONCE (1 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the associated data. - */ -#define DRYDOMAIN256_ASSOC_DATA (2 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the message. - */ -#define DRYDOMAIN256_MESSAGE (3 << 4) - -/** - * \brief Internal state of the GASCON-128 permutation. - */ -typedef union -{ - uint64_t S[GASCON128_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON128_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON128_STATE_SIZE]; /**< Bytes of the state */ - -} gascon128_state_t; - -/** - * \brief Internal state of the GASCON-256 permutation. - */ -typedef union -{ - uint64_t S[GASCON256_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON256_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON256_STATE_SIZE]; /**< Bytes of the state */ - -} gascon256_state_t; - -/** - * \brief Structure of a rate block for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_RATE]; /**< Bytes of the rate */ - -} drysponge128_rate_t; - -/** - * \brief Structure of a rate block for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_RATE]; /**< Bytes of the rate */ - -} drysponge256_rate_t; - -/** - * \brief Structure of the "x" value for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_XSIZE]; /**< Bytes of the rate */ - -} drysponge128_x_t; - -/** - * \brief Structure of the "x" value for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_XSIZE]; /**< Bytes of the rate */ - -} drysponge256_x_t; - -/** - * \brief Structure of the rolling DrySPONGE128 state. - */ -typedef struct -{ - gascon128_state_t c; /**< GASCON-128 state for the capacity */ - drysponge128_rate_t r; /**< Buffer for a rate block of data */ - drysponge128_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge128_state_t; - -/** - * \brief Structure of the rolling DrySPONGE256 state. - */ -typedef struct -{ - gascon256_state_t c; /**< GASCON-256 state for the capacity */ - drysponge256_rate_t r; /**< Buffer for a rate block of data */ - drysponge256_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge256_state_t; - -/** - * \brief Permutes the GASCON-128 state using one iteration of CoreRound. - * - * \param state The GASCON-128 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon128_core_round(gascon128_state_t *state, uint8_t round); - -/** - * \brief Permutes the GASCON-256 state using one iteration of CoreRound. - * - * \param state The GASCON-256 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon256_core_round(gascon256_state_t *state, uint8_t round); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds and squeezes data out of the GASGON-128 state. - * - * \param state The DrySPONGE128 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge128_g(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds and squeezes data out of the GASGON-256 state. - * - * \param state The DrySPONGE256 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge256_g(drysponge256_state_t *state); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE128 state. - */ -void drysponge128_g_core(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE256 state. - */ -void drysponge256_g_core(drysponge256_state_t *state); - -/** - * \brief Performs the absorption phase of the DrySPONGE128 F function. - * - * \param state The DrySPONGE128 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE128_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge128_g() or - * drysponge128_g_core() to perform the full F operation. - */ -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Performs the absorption phase of the DrySPONGE256 F function. - * - * \param state The DrySPONGE256 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE256_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge256_g() or - * drysponge256_g_core() to perform the full F operation. - */ -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Set up a DrySPONGE128 state to begin encryption or decryption. - * - * \param state The DrySPONGE128 state. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -/** - * \brief Set up a DrySPONGE256 state to begin encryption or decryption. - * - * \param state The DrySPONGE256 state. - * \param key Points to the 32 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-util.h b/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge-avr.S b/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge-avr.S new file mode 100644 index 0000000..84d0ff8 --- /dev/null +++ b/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge-avr.S @@ -0,0 +1,5092 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gascon128_core_round + .type gascon128_core_round, @function +gascon128_core_round: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + eor r4,r22 + ldd r23,Z+8 + ldd r12,Z+24 + ldd r13,Z+32 + eor r18,r13 + eor r4,r23 + eor r13,r12 + mov r14,r23 + mov r0,r18 + com r0 + and r14,r0 + mov r15,r4 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r4 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r18 + mov r0,r13 + com r0 + and r16,r0 + eor r18,r15 + eor r23,r24 + eor r4,r25 + eor r12,r16 + eor r13,r14 + eor r23,r18 + eor r12,r4 + eor r18,r13 + com r4 + st Z,r18 + std Z+8,r23 + std Z+24,r12 + std Z+32,r13 + ldd r23,Z+9 + ldd r12,Z+25 + ldd r13,Z+33 + eor r19,r13 + eor r5,r23 + eor r13,r12 + mov r14,r23 + mov r0,r19 + com r0 + and r14,r0 + mov r15,r5 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r5 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r19 + mov r0,r13 + com r0 + and r16,r0 + eor r19,r15 + eor r23,r24 + eor r5,r25 + eor r12,r16 + eor r13,r14 + eor r23,r19 + eor r12,r5 + eor r19,r13 + com r5 + std Z+1,r19 + std Z+9,r23 + std Z+25,r12 + std Z+33,r13 + ldd r23,Z+10 + ldd r12,Z+26 + ldd r13,Z+34 + eor r20,r13 + eor r6,r23 + eor r13,r12 + mov r14,r23 + mov r0,r20 + com r0 + and r14,r0 + mov r15,r6 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r6 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r20 + mov r0,r13 + com r0 + and r16,r0 + eor r20,r15 + eor r23,r24 + eor r6,r25 + eor r12,r16 + eor r13,r14 + eor r23,r20 + eor r12,r6 + eor r20,r13 + com r6 + std Z+2,r20 + std Z+10,r23 + std Z+26,r12 + std Z+34,r13 + ldd r23,Z+11 + ldd r12,Z+27 + ldd r13,Z+35 + eor r21,r13 + eor r7,r23 + eor r13,r12 + mov r14,r23 + mov r0,r21 + com r0 + and r14,r0 + mov r15,r7 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r7 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r21 + mov r0,r13 + com r0 + and r16,r0 + eor r21,r15 + eor r23,r24 + eor r7,r25 + eor r12,r16 + eor r13,r14 + eor r23,r21 + eor r12,r7 + eor r21,r13 + com r7 + std Z+3,r21 + std Z+11,r23 + std Z+27,r12 + std Z+35,r13 + ldd r23,Z+12 + ldd r12,Z+28 + ldd r13,Z+36 + eor r26,r13 + eor r8,r23 + eor r13,r12 + mov r14,r23 + mov r0,r26 + com r0 + and r14,r0 + mov r15,r8 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r8 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r26 + mov r0,r13 + com r0 + and r16,r0 + eor r26,r15 + eor r23,r24 + eor r8,r25 + eor r12,r16 + eor r13,r14 + eor r23,r26 + eor r12,r8 + eor r26,r13 + com r8 + std Z+4,r26 + std Z+12,r23 + std Z+28,r12 + std Z+36,r13 + ldd r23,Z+13 + ldd r12,Z+29 + ldd r13,Z+37 + eor r27,r13 + eor r9,r23 + eor r13,r12 + mov r14,r23 + mov r0,r27 + com r0 + and r14,r0 + mov r15,r9 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r9 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r27 + mov r0,r13 + com r0 + and r16,r0 + eor r27,r15 + eor r23,r24 + eor r9,r25 + eor r12,r16 + eor r13,r14 + eor r23,r27 + eor r12,r9 + eor r27,r13 + com r9 + std Z+5,r27 + std Z+13,r23 + std Z+29,r12 + std Z+37,r13 + ldd r23,Z+14 + ldd r12,Z+30 + ldd r13,Z+38 + eor r2,r13 + eor r10,r23 + eor r13,r12 + mov r14,r23 + mov r0,r2 + com r0 + and r14,r0 + mov r15,r10 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r10 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r2 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r15 + eor r23,r24 + eor r10,r25 + eor r12,r16 + eor r13,r14 + eor r23,r2 + eor r12,r10 + eor r2,r13 + com r10 + std Z+6,r2 + std Z+14,r23 + std Z+30,r12 + std Z+38,r13 + ldd r23,Z+15 + ldd r12,Z+31 + ldd r13,Z+39 + eor r3,r13 + eor r11,r23 + eor r13,r12 + mov r14,r23 + mov r0,r3 + com r0 + and r14,r0 + mov r15,r11 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r11 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r3 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r15 + eor r23,r24 + eor r11,r25 + eor r12,r16 + eor r13,r14 + eor r23,r3 + eor r12,r11 + eor r3,r13 + com r11 + std Z+7,r3 + std Z+15,r23 + std Z+31,r12 + std Z+39,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r18 + std Z+25,r19 + std Z+26,r20 + std Z+27,r21 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gascon128_core_round, .-gascon128_core_round + + .text +.global drysponge128_g + .type drysponge128_g, @function +drysponge128_g: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + subi r30,180 + sbci r31,255 + ld r19,Z + subi r30,76 + sbc r31,r1 + ldi r18,240 + std Z+40,r1 + std Z+41,r1 + std Z+42,r1 + std Z+43,r1 + std Z+44,r1 + std Z+45,r1 + std Z+46,r1 + std Z+47,r1 + std Z+48,r1 + std Z+49,r1 + std Z+50,r1 + std Z+51,r1 + std Z+52,r1 + std Z+53,r1 + std Z+54,r1 + std Z+55,r1 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 +38: + eor r4,r18 + ldd r12,Z+8 + ldd r13,Z+24 + ldd r14,Z+32 + eor r20,r14 + eor r4,r12 + eor r14,r13 + mov r15,r12 + mov r0,r20 + com r0 + and r15,r0 + mov r24,r4 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r4 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r20 + mov r0,r14 + com r0 + and r17,r0 + eor r20,r24 + eor r12,r25 + eor r4,r16 + eor r13,r17 + eor r14,r15 + eor r12,r20 + eor r13,r4 + eor r20,r14 + com r4 + st Z,r20 + std Z+8,r12 + std Z+24,r13 + std Z+32,r14 + ldd r12,Z+9 + ldd r13,Z+25 + ldd r14,Z+33 + eor r21,r14 + eor r5,r12 + eor r14,r13 + mov r15,r12 + mov r0,r21 + com r0 + and r15,r0 + mov r24,r5 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r5 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r21 + mov r0,r14 + com r0 + and r17,r0 + eor r21,r24 + eor r12,r25 + eor r5,r16 + eor r13,r17 + eor r14,r15 + eor r12,r21 + eor r13,r5 + eor r21,r14 + com r5 + std Z+1,r21 + std Z+9,r12 + std Z+25,r13 + std Z+33,r14 + ldd r12,Z+10 + ldd r13,Z+26 + ldd r14,Z+34 + eor r22,r14 + eor r6,r12 + eor r14,r13 + mov r15,r12 + mov r0,r22 + com r0 + and r15,r0 + mov r24,r6 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r6 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r22 + mov r0,r14 + com r0 + and r17,r0 + eor r22,r24 + eor r12,r25 + eor r6,r16 + eor r13,r17 + eor r14,r15 + eor r12,r22 + eor r13,r6 + eor r22,r14 + com r6 + std Z+2,r22 + std Z+10,r12 + std Z+26,r13 + std Z+34,r14 + ldd r12,Z+11 + ldd r13,Z+27 + ldd r14,Z+35 + eor r23,r14 + eor r7,r12 + eor r14,r13 + mov r15,r12 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r7 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r7 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r23 + mov r0,r14 + com r0 + and r17,r0 + eor r23,r24 + eor r12,r25 + eor r7,r16 + eor r13,r17 + eor r14,r15 + eor r12,r23 + eor r13,r7 + eor r23,r14 + com r7 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r14 + ldd r12,Z+12 + ldd r13,Z+28 + ldd r14,Z+36 + eor r26,r14 + eor r8,r12 + eor r14,r13 + mov r15,r12 + mov r0,r26 + com r0 + and r15,r0 + mov r24,r8 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r8 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r26 + mov r0,r14 + com r0 + and r17,r0 + eor r26,r24 + eor r12,r25 + eor r8,r16 + eor r13,r17 + eor r14,r15 + eor r12,r26 + eor r13,r8 + eor r26,r14 + com r8 + std Z+4,r26 + std Z+12,r12 + std Z+28,r13 + std Z+36,r14 + ldd r12,Z+13 + ldd r13,Z+29 + ldd r14,Z+37 + eor r27,r14 + eor r9,r12 + eor r14,r13 + mov r15,r12 + mov r0,r27 + com r0 + and r15,r0 + mov r24,r9 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r9 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r27 + mov r0,r14 + com r0 + and r17,r0 + eor r27,r24 + eor r12,r25 + eor r9,r16 + eor r13,r17 + eor r14,r15 + eor r12,r27 + eor r13,r9 + eor r27,r14 + com r9 + std Z+5,r27 + std Z+13,r12 + std Z+29,r13 + std Z+37,r14 + ldd r12,Z+14 + ldd r13,Z+30 + ldd r14,Z+38 + eor r2,r14 + eor r10,r12 + eor r14,r13 + mov r15,r12 + mov r0,r2 + com r0 + and r15,r0 + mov r24,r10 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r10 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r2 + mov r0,r14 + com r0 + and r17,r0 + eor r2,r24 + eor r12,r25 + eor r10,r16 + eor r13,r17 + eor r14,r15 + eor r12,r2 + eor r13,r10 + eor r2,r14 + com r10 + std Z+6,r2 + std Z+14,r12 + std Z+30,r13 + std Z+38,r14 + ldd r12,Z+15 + ldd r13,Z+31 + ldd r14,Z+39 + eor r3,r14 + eor r11,r12 + eor r14,r13 + mov r15,r12 + mov r0,r3 + com r0 + and r15,r0 + mov r24,r11 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r11 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r3 + mov r0,r14 + com r0 + and r17,r0 + eor r3,r24 + eor r12,r25 + eor r11,r16 + eor r13,r17 + eor r14,r15 + eor r12,r3 + eor r13,r11 + eor r3,r14 + com r11 + std Z+7,r3 + std Z+15,r12 + std Z+31,r13 + std Z+39,r14 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r20,Z+24 + ldd r21,Z+25 + ldd r22,Z+26 + ldd r23,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r20 + std Z+25,r21 + std Z+26,r22 + std Z+27,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r22,Z+34 + ldd r23,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r20 + std Z+33,r21 + std Z+34,r22 + std Z+35,r23 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + ldd r12,Z+40 + ldd r13,Z+41 + ldd r14,Z+42 + ldd r15,Z+43 + eor r12,r20 + eor r13,r21 + eor r14,r22 + eor r15,r23 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + std Z+40,r12 + std Z+41,r13 + std Z+42,r14 + std Z+43,r15 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + ldd r0,Z+24 + eor r12,r0 + ldd r0,Z+25 + eor r13,r0 + ldd r0,Z+26 + eor r14,r0 + ldd r0,Z+27 + eor r15,r0 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ldd r12,Z+48 + ldd r13,Z+49 + ldd r14,Z+50 + ldd r15,Z+51 + ldd r0,Z+8 + eor r12,r0 + ldd r0,Z+9 + eor r13,r0 + ldd r0,Z+10 + eor r14,r0 + ldd r0,Z+11 + eor r15,r0 + ldd r0,Z+28 + eor r12,r0 + ldd r0,Z+29 + eor r13,r0 + ldd r0,Z+30 + eor r14,r0 + ldd r0,Z+31 + eor r15,r0 + std Z+48,r12 + std Z+49,r13 + std Z+50,r14 + std Z+51,r15 + ldd r12,Z+52 + ldd r13,Z+53 + ldd r14,Z+54 + ldd r15,Z+55 + ldd r0,Z+12 + eor r12,r0 + ldd r0,Z+13 + eor r13,r0 + ldd r0,Z+14 + eor r14,r0 + ldd r0,Z+15 + eor r15,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + std Z+52,r12 + std Z+53,r13 + std Z+54,r14 + std Z+55,r15 + subi r18,15 + dec r19 + breq 5904f + rjmp 38b +5904: + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size drysponge128_g, .-drysponge128_g + + .text +.global gascon256_core_round + .type gascon256_core_round, @function +gascon256_core_round: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 26 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r26,Z+ + ld r27,Z+ + ld r2,Z+ + ld r3,Z+ + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + eor r4,r22 + ld r22,Z + ldd r23,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r23,r22 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r22 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r18 + eor r12,r23 + eor r13,r4 + eor r15,r14 + eor r18,r24 + com r4 + std Y+1,r18 + st Z,r22 + std Z+8,r23 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r22,Z+1 + ldd r23,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r23,r22 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r22 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r19 + eor r12,r23 + eor r13,r5 + eor r15,r14 + eor r19,r24 + com r5 + std Y+2,r19 + std Z+1,r22 + std Z+9,r23 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r22,Z+2 + ldd r23,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r23,r22 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r22 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r20 + eor r12,r23 + eor r13,r6 + eor r15,r14 + eor r20,r24 + com r6 + std Y+3,r20 + std Z+2,r22 + std Z+10,r23 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r22,Z+3 + ldd r23,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r23,r22 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r22 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r21 + eor r12,r23 + eor r13,r7 + eor r15,r14 + eor r21,r24 + com r7 + std Y+4,r21 + std Z+3,r22 + std Z+11,r23 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r22,Z+4 + ldd r23,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r26,r24 + eor r23,r22 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r22 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r26 + eor r12,r23 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+5,r26 + std Z+4,r22 + std Z+12,r23 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r22,Z+5 + ldd r23,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r27,r24 + eor r23,r22 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r22 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r27 + eor r12,r23 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+6,r27 + std Z+5,r22 + std Z+13,r23 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r22,Z+6 + ldd r23,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r2,r24 + eor r23,r22 + eor r10,r12 + eor r14,r13 + eor r24,r15 + mov r17,r2 + mov r25,r22 + mov r0,r2 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r10 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r10 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r2 + eor r12,r23 + eor r13,r10 + eor r15,r14 + eor r2,r24 + com r10 + std Y+7,r2 + std Z+6,r22 + std Z+14,r23 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r22,Z+7 + ldd r23,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r3,r24 + eor r23,r22 + eor r11,r12 + eor r14,r13 + eor r24,r15 + mov r17,r3 + mov r25,r22 + mov r0,r3 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r11 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r11 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r3 + eor r12,r23 + eor r13,r11 + eor r15,r14 + eor r3,r24 + com r11 + std Y+8,r3 + std Z+7,r22 + std Z+15,r23 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + bst r22,0 + lsr r13 + ror r12 + ror r23 + ror r22 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r26 + std Z+21,r27 + std Z+22,r2 + std Z+23,r3 + movw r22,r4 + movw r12,r6 + movw r14,r8 + movw r24,r10 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r4 + eor r15,r5 + eor r24,r6 + eor r25,r7 + eor r22,r8 + eor r23,r9 + eor r12,r10 + eor r13,r11 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r14 + eor r5,r15 + eor r6,r24 + eor r7,r25 + eor r8,r22 + eor r9,r23 + eor r10,r12 + eor r11,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r12 + mov r12,r0 + mov r0,r23 + mov r23,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r26 + std Z+45,r27 + std Z+46,r2 + std Z+47,r3 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r26,Z+52 + ldd r27,Z+53 + ldd r2,Z+54 + ldd r3,Z+55 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r26 + std Z+53,r27 + std Z+54,r2 + std Z+55,r3 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r26,Z+60 + ldd r27,Z+61 + ldd r2,Z+62 + ldd r3,Z+63 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r26 + std Z+61,r27 + std Z+62,r2 + std Z+63,r3 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r2,Y+7 + ldd r3,Y+8 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+24,r4 + std Z+25,r5 + std Z+26,r6 + std Z+27,r7 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + st -Z,r3 + st -Z,r2 + st -Z,r27 + st -Z,r26 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + adiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gascon256_core_round, .-gascon256_core_round + + .text +.global drysponge256_g + .type drysponge256_g, @function +drysponge256_g: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 44 + subi r30,148 + sbci r31,255 + ld r19,Z + subi r30,108 + sbc r31,r1 + ldi r18,240 + std Y+25,r19 + std Y+26,r18 + std Y+9,r1 + std Y+10,r1 + std Y+11,r1 + std Y+12,r1 + std Y+13,r1 + std Y+14,r1 + std Y+15,r1 + std Y+16,r1 + std Y+17,r1 + std Y+18,r1 + std Y+19,r1 + std Y+20,r1 + std Y+21,r1 + std Y+22,r1 + std Y+23,r1 + std Y+24,r1 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r22,Z+ + ld r23,Z+ + ld r26,Z+ + ld r27,Z+ + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 +40: + ldd r24,Y+26 + eor r2,r24 + subi r24,15 + std Y+26,r24 + ld r10,Z + ldd r11,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r11,r10 + eor r2,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r10 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r2 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r2 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r18 + eor r12,r11 + eor r13,r2 + eor r15,r14 + eor r18,r24 + com r2 + std Y+1,r18 + st Z,r10 + std Z+8,r11 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r10,Z+1 + ldd r11,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r11,r10 + eor r3,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r10 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r3 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r3 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r19 + eor r12,r11 + eor r13,r3 + eor r15,r14 + eor r19,r24 + com r3 + std Y+2,r19 + std Z+1,r10 + std Z+9,r11 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r10,Z+2 + ldd r11,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r11,r10 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r10 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r20 + eor r12,r11 + eor r13,r4 + eor r15,r14 + eor r20,r24 + com r4 + std Y+3,r20 + std Z+2,r10 + std Z+10,r11 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r10,Z+3 + ldd r11,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r11,r10 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r10 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r21 + eor r12,r11 + eor r13,r5 + eor r15,r14 + eor r21,r24 + com r5 + std Y+4,r21 + std Z+3,r10 + std Z+11,r11 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r10,Z+4 + ldd r11,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r22,r24 + eor r11,r10 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r22 + mov r25,r10 + mov r0,r22 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r22 + eor r12,r11 + eor r13,r6 + eor r15,r14 + eor r22,r24 + com r6 + std Y+5,r22 + std Z+4,r10 + std Z+12,r11 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r10,Z+5 + ldd r11,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r23,r24 + eor r11,r10 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r23 + mov r25,r10 + mov r0,r23 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r23 + eor r12,r11 + eor r13,r7 + eor r15,r14 + eor r23,r24 + com r7 + std Y+6,r23 + std Z+5,r10 + std Z+13,r11 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r10,Z+6 + ldd r11,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r26,r24 + eor r11,r10 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r10 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r26 + eor r12,r11 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+7,r26 + std Z+6,r10 + std Z+14,r11 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r10,Z+7 + ldd r11,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r27,r24 + eor r11,r10 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r10 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r27 + eor r12,r11 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+8,r27 + std Z+7,r10 + std Z+15,r11 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + bst r10,0 + lsr r13 + ror r12 + ror r11 + ror r10 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r22 + std Z+13,r23 + std Z+14,r26 + std Z+15,r27 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r26,Z+22 + ldd r27,Z+23 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r23 + mov r23,r26 + mov r26,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r22 + std Z+21,r23 + std Z+22,r26 + std Z+23,r27 + movw r10,r2 + movw r12,r4 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + mov r0,r2 + mov r2,r4 + mov r4,r0 + mov r0,r3 + mov r3,r5 + mov r5,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + or r5,r0 + mov r0,r6 + mov r6,r8 + mov r8,r0 + mov r0,r7 + mov r7,r9 + mov r9,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r6,r10 + eor r7,r11 + eor r8,r12 + eor r9,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r26,Z+38 + ldd r27,Z+39 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r12 + mov r12,r0 + mov r0,r11 + mov r11,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r22 + std Z+37,r23 + std Z+38,r26 + std Z+39,r27 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r22,Z+44 + ldd r23,Z+45 + ldd r26,Z+46 + ldd r27,Z+47 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r22 + std Z+45,r23 + std Z+46,r26 + std Z+47,r27 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r22,Z+52 + ldd r23,Z+53 + ldd r26,Z+54 + ldd r27,Z+55 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r22 + std Z+53,r23 + std Z+54,r26 + std Z+55,r27 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r22,Z+60 + ldd r23,Z+61 + ldd r26,Z+62 + ldd r27,Z+63 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r22 + std Z+61,r23 + std Z+62,r26 + std Z+63,r27 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + ldd r10,Y+9 + ldd r11,Y+10 + ldd r12,Y+11 + ldd r13,Y+12 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + ldd r0,Z+12 + eor r10,r0 + ldd r0,Z+13 + eor r11,r0 + ldd r0,Z+14 + eor r12,r0 + ldd r0,Z+15 + eor r13,r0 + ldd r0,Z+32 + eor r10,r0 + ldd r0,Z+33 + eor r11,r0 + ldd r0,Z+34 + eor r12,r0 + ldd r0,Z+35 + eor r13,r0 + ldd r0,Z+52 + eor r10,r0 + ldd r0,Z+53 + eor r11,r0 + ldd r0,Z+54 + eor r12,r0 + ldd r0,Z+55 + eor r13,r0 + std Y+9,r10 + std Y+10,r11 + std Y+11,r12 + std Y+12,r13 + ldd r10,Y+13 + ldd r11,Y+14 + ldd r12,Y+15 + ldd r13,Y+16 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + ldd r0,Z+16 + eor r10,r0 + ldd r0,Z+17 + eor r11,r0 + ldd r0,Z+18 + eor r12,r0 + ldd r0,Z+19 + eor r13,r0 + ldd r0,Z+36 + eor r10,r0 + ldd r0,Z+37 + eor r11,r0 + ldd r0,Z+38 + eor r12,r0 + ldd r0,Z+39 + eor r13,r0 + ldd r0,Z+40 + eor r10,r0 + ldd r0,Z+41 + eor r11,r0 + ldd r0,Z+42 + eor r12,r0 + ldd r0,Z+43 + eor r13,r0 + std Y+13,r10 + std Y+14,r11 + std Y+15,r12 + std Y+16,r13 + ldd r10,Y+17 + ldd r11,Y+18 + ldd r12,Y+19 + ldd r13,Y+20 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + ld r0,Z + eor r10,r0 + ldd r0,Z+1 + eor r11,r0 + ldd r0,Z+2 + eor r12,r0 + ldd r0,Z+3 + eor r13,r0 + ldd r0,Z+20 + eor r10,r0 + ldd r0,Z+21 + eor r11,r0 + ldd r0,Z+22 + eor r12,r0 + ldd r0,Z+23 + eor r13,r0 + ldd r0,Z+44 + eor r10,r0 + ldd r0,Z+45 + eor r11,r0 + ldd r0,Z+46 + eor r12,r0 + ldd r0,Z+47 + eor r13,r0 + std Y+17,r10 + std Y+18,r11 + std Y+19,r12 + std Y+20,r13 + ldd r10,Y+21 + ldd r11,Y+22 + ldd r12,Y+23 + ldd r13,Y+24 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + ldd r0,Z+4 + eor r10,r0 + ldd r0,Z+5 + eor r11,r0 + ldd r0,Z+6 + eor r12,r0 + ldd r0,Z+7 + eor r13,r0 + ldd r0,Z+8 + eor r10,r0 + ldd r0,Z+9 + eor r11,r0 + ldd r0,Z+10 + eor r12,r0 + ldd r0,Z+11 + eor r13,r0 + ldd r0,Z+48 + eor r10,r0 + ldd r0,Z+49 + eor r11,r0 + ldd r0,Z+50 + eor r12,r0 + ldd r0,Z+51 + eor r13,r0 + std Y+21,r10 + std Y+22,r11 + std Y+23,r12 + std Y+24,r13 + ldd r10,Y+25 + dec r10 + std Y+25,r10 + breq 6623f + rjmp 40b +6623: + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + std Z+28,r6 + std Z+29,r7 + std Z+30,r8 + std Z+31,r9 + st -Z,r27 + st -Z,r26 + st -Z,r23 + st -Z,r22 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + ldi r25,72 + add r30,r25 + adc r31,r1 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + ldd r2,Y+17 + ldd r3,Y+18 + ldd r4,Y+19 + ldd r5,Y+20 + ldd r6,Y+21 + ldd r7,Y+22 + ldd r8,Y+23 + ldd r9,Y+24 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + std Z+12,r6 + std Z+13,r7 + std Z+14,r8 + std Z+15,r9 + adiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size drysponge256_g, .-drysponge256_g + +#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge.c b/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge.c index 67f1b27..6dfe48c 100644 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge.c +++ b/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-drysponge.c @@ -23,6 +23,8 @@ #include "internal-drysponge.h" #include +#if !defined(__AVR__) + /* Right rotations in bit-interleaved format */ #define intRightRotateEven(x,bits) \ (__extension__ ({ \ @@ -289,6 +291,8 @@ void drysponge256_g(drysponge256_state_t *state) } } +#endif /* !__AVR__ */ + void drysponge128_g_core(drysponge128_state_t *state) { unsigned round; @@ -304,6 +308,7 @@ void drysponge256_g_core(drysponge256_state_t *state) } /** + * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) * \brief Selects an element of x in constant time. * * \param x Points to the four elements of x. @@ -311,6 +316,7 @@ void drysponge256_g_core(drysponge256_state_t *state) * * \return The selected element of x. */ +#if !defined(__AVR__) STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) { /* We need to be careful how we select each element of x because @@ -340,6 +346,11 @@ STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); return result ^ (x[3] & mask); } +#else +/* AVR is more or less immune to cache timing issues because it doesn't + * have anything like an L1 or L2 cache. Select the word directly */ +#define drysponge_select_x(x, index) ((x)[(index)]) +#endif /** * \brief Mixes a 32-bit value into the DrySPONGE128 state. diff --git a/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-util.h b/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-util.h index e79158c..e30166d 100644 --- a/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-util.h +++ b/drygascon/Implementations/crypto_aead/drygascon128/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.c b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.h b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/api.h b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/api.h deleted file mode 100644 index 75fabd7..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 32 -#define CRYPTO_NOOVERLAP 1 diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.c b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.c deleted file mode 100644 index e963903..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.c +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "drygascon.h" -#include "internal-drysponge.h" -#include - -aead_cipher_t const drygascon128_cipher = { - "DryGASCON128", - DRYGASCON128_KEY_SIZE, - DRYGASCON128_NONCE_SIZE, - DRYGASCON128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_aead_encrypt, - drygascon128_aead_decrypt -}; - -aead_cipher_t const drygascon256_cipher = { - "DryGASCON256", - DRYGASCON256_KEY_SIZE, - DRYGASCON256_NONCE_SIZE, - DRYGASCON256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_aead_encrypt, - drygascon256_aead_decrypt -}; - -aead_hash_algorithm_t const drygascon128_hash_algorithm = { - "DryGASCON128-HASH", - sizeof(int), - DRYGASCON128_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const drygascon256_hash_algorithm = { - "DryGASCON256-HASH", - sizeof(int), - DRYGASCON256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Processes associated data for DryGASCON128. - * - * \param state DrySPONGE128 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon128_process_ad - (drysponge128_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(state, ad, DRYSPONGE128_RATE); - drysponge128_g_core(state); - ad += DRYSPONGE128_RATE; - adlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN128_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN128_FINAL; - if (adlen < DRYSPONGE128_RATE) - state->domain |= DRYDOMAIN128_PADDED; - drysponge128_f_absorb(state, ad, (unsigned)adlen); - drysponge128_g(state); -} - -/** - * \brief Processes associated data for DryGASCON256. - * - * \param state DrySPONGE256 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon256_process_ad - (drysponge256_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(state, ad, DRYSPONGE256_RATE); - drysponge256_g_core(state); - ad += DRYSPONGE256_RATE; - adlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN256_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN256_FINAL; - if (adlen < DRYSPONGE256_RATE) - state->domain |= DRYDOMAIN256_PADDED; - drysponge256_f_absorb(state, ad, (unsigned)adlen); - drysponge256_g(state); -} - -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge128_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - mlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (mlen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)mlen; - drysponge128_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge128_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, DRYGASCON128_TAG_SIZE); - return 0; -} - -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON128_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON128_TAG_SIZE; - drysponge128_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE128_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE128_RATE); - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - clen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (clen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge128_f_absorb(&state, m, temp); - drysponge128_g(&state); - c += temp; - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state.r.B, c, DRYGASCON128_TAG_SIZE); -} - -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge256_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - mlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (mlen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)mlen; - drysponge256_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge256_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, 16); - drysponge256_g(&state); - memcpy(c + 16, state.r.B, 16); - return 0; -} - -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned char *mtemp = m; - unsigned temp; - int result; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON256_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON256_TAG_SIZE; - drysponge256_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE256_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE256_RATE); - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - clen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (clen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge256_f_absorb(&state, m, temp); - drysponge256_g(&state); - c += temp; - } - - /* Check the authentication tag which is split into two pieces */ - result = aead_check_tag(0, 0, state.r.B, c, 16); - drysponge256_g(&state); - return aead_check_tag_precheck - (mtemp, *mlen, state.r.B, c + 16, 16, ~result); -} - -/** - * \brief Precomputed initialization vector for DryGASCON128-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE128. - */ -static unsigned char const drygascon128_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89 -}; - -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge128_state_t state; - memcpy(state.c.B, drygascon128_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon128_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE128_ROUNDS; - drygascon128_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge128_g(&state); - memcpy(out + 16, state.r.B, 16); - return 0; -} - -/** - * \brief Precomputed initialization vector for DryGASCON256-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE256. - */ -static unsigned char const drygascon256_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0x45, 0x28, 0x21, 0xe6, 0x38, 0xd0, 0x13, 0x77, - 0xbe, 0x54, 0x66, 0xcf, 0x34, 0xe9, 0x0c, 0x6c -}; - -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge256_state_t state; - memcpy(state.c.B, drygascon256_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon256_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE256_ROUNDS; - drygascon256_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 16, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 32, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 48, state.r.B, 16); - return 0; -} diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.h b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.h deleted file mode 100644 index 12e18c3..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/drygascon.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_DRYGASCON_H -#define LWCRYPTO_DRYGASCON_H - -#include "aead-common.h" - -/** - * \file drygascon.h - * \brief DryGASCON authenticated encryption algorithm. - * - * DryGASCON is a family of authenticated encryption algorithms based - * around a generalised version of the ASCON permutation. DryGASCON - * is designed to provide some protection against power analysis. - * - * There are four algorithms in the DryGASCON family: - * - * \li DryGASCON128 is an authenticated encryption algorithm with a - * 128-bit key, a 128-bit nonce, and a 128-bit authentication tag. - * \li DryGASCON256 is an authenticated encryption algorithm with a - * 256-bit key, a 128-bit nonce, and a 128-256 authentication tag. - * \li DryGASCON128-HASH is a hash algorithm with a 256-bit output. - * \li DryGASCON256-HASH is a hash algorithm with a 512-bit output. - * - * DryGASCON128 and DryGASCON128-HASH are the primary members of the family. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for DryGASCON128. - */ -#define DRYGASCON128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for DryGASCON128. - */ -#define DRYGASCON128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for DryGASCON128. - */ -#define DRYGASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON128-HASH. - */ -#define DRYGASCON128_HASH_SIZE 32 - -/** - * \brief Size of the key for DryGASCON256. - */ -#define DRYGASCON256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for DryGASCON256. - */ -#define DRYGASCON256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for DryGASCON256. - */ -#define DRYGASCON256_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON256-HASH. - */ -#define DRYGASCON256_HASH_SIZE 64 - -/** - * \brief Meta-information block for the DryGASCON128 cipher. - */ -extern aead_cipher_t const drygascon128_cipher; - -/** - * \brief Meta-information block for the DryGASCON256 cipher. - */ -extern aead_cipher_t const drygascon256_cipher; - -/** - * \brief Meta-information block for DryGASCON128-HASH. - */ -extern aead_hash_algorithm_t const drygascon128_hash_algorithm; - -/** - * \brief Meta-information block for DryGASCON256-HASH. - */ -extern aead_hash_algorithm_t const drygascon256_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with DryGASCON128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon128_aead_decrypt() - */ -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon128_aead_encrypt() - */ -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with DryGASCON256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon256_aead_decrypt() - */ -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon256_aead_encrypt() - */ -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with DRYGASCON128. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON128_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with DRYGASCON256. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/encrypt.c b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/encrypt.c deleted file mode 100644 index 9f3c373..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "drygascon.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return drygascon256_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return drygascon256_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge-avr.S b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge-avr.S deleted file mode 100644 index 84d0ff8..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge-avr.S +++ /dev/null @@ -1,5092 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gascon128_core_round - .type gascon128_core_round, @function -gascon128_core_round: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - eor r4,r22 - ldd r23,Z+8 - ldd r12,Z+24 - ldd r13,Z+32 - eor r18,r13 - eor r4,r23 - eor r13,r12 - mov r14,r23 - mov r0,r18 - com r0 - and r14,r0 - mov r15,r4 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r4 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r18 - mov r0,r13 - com r0 - and r16,r0 - eor r18,r15 - eor r23,r24 - eor r4,r25 - eor r12,r16 - eor r13,r14 - eor r23,r18 - eor r12,r4 - eor r18,r13 - com r4 - st Z,r18 - std Z+8,r23 - std Z+24,r12 - std Z+32,r13 - ldd r23,Z+9 - ldd r12,Z+25 - ldd r13,Z+33 - eor r19,r13 - eor r5,r23 - eor r13,r12 - mov r14,r23 - mov r0,r19 - com r0 - and r14,r0 - mov r15,r5 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r5 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r19 - mov r0,r13 - com r0 - and r16,r0 - eor r19,r15 - eor r23,r24 - eor r5,r25 - eor r12,r16 - eor r13,r14 - eor r23,r19 - eor r12,r5 - eor r19,r13 - com r5 - std Z+1,r19 - std Z+9,r23 - std Z+25,r12 - std Z+33,r13 - ldd r23,Z+10 - ldd r12,Z+26 - ldd r13,Z+34 - eor r20,r13 - eor r6,r23 - eor r13,r12 - mov r14,r23 - mov r0,r20 - com r0 - and r14,r0 - mov r15,r6 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r6 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r20 - mov r0,r13 - com r0 - and r16,r0 - eor r20,r15 - eor r23,r24 - eor r6,r25 - eor r12,r16 - eor r13,r14 - eor r23,r20 - eor r12,r6 - eor r20,r13 - com r6 - std Z+2,r20 - std Z+10,r23 - std Z+26,r12 - std Z+34,r13 - ldd r23,Z+11 - ldd r12,Z+27 - ldd r13,Z+35 - eor r21,r13 - eor r7,r23 - eor r13,r12 - mov r14,r23 - mov r0,r21 - com r0 - and r14,r0 - mov r15,r7 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r7 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r21 - mov r0,r13 - com r0 - and r16,r0 - eor r21,r15 - eor r23,r24 - eor r7,r25 - eor r12,r16 - eor r13,r14 - eor r23,r21 - eor r12,r7 - eor r21,r13 - com r7 - std Z+3,r21 - std Z+11,r23 - std Z+27,r12 - std Z+35,r13 - ldd r23,Z+12 - ldd r12,Z+28 - ldd r13,Z+36 - eor r26,r13 - eor r8,r23 - eor r13,r12 - mov r14,r23 - mov r0,r26 - com r0 - and r14,r0 - mov r15,r8 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r8 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r26 - mov r0,r13 - com r0 - and r16,r0 - eor r26,r15 - eor r23,r24 - eor r8,r25 - eor r12,r16 - eor r13,r14 - eor r23,r26 - eor r12,r8 - eor r26,r13 - com r8 - std Z+4,r26 - std Z+12,r23 - std Z+28,r12 - std Z+36,r13 - ldd r23,Z+13 - ldd r12,Z+29 - ldd r13,Z+37 - eor r27,r13 - eor r9,r23 - eor r13,r12 - mov r14,r23 - mov r0,r27 - com r0 - and r14,r0 - mov r15,r9 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r9 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r27 - mov r0,r13 - com r0 - and r16,r0 - eor r27,r15 - eor r23,r24 - eor r9,r25 - eor r12,r16 - eor r13,r14 - eor r23,r27 - eor r12,r9 - eor r27,r13 - com r9 - std Z+5,r27 - std Z+13,r23 - std Z+29,r12 - std Z+37,r13 - ldd r23,Z+14 - ldd r12,Z+30 - ldd r13,Z+38 - eor r2,r13 - eor r10,r23 - eor r13,r12 - mov r14,r23 - mov r0,r2 - com r0 - and r14,r0 - mov r15,r10 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r10 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r2 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r15 - eor r23,r24 - eor r10,r25 - eor r12,r16 - eor r13,r14 - eor r23,r2 - eor r12,r10 - eor r2,r13 - com r10 - std Z+6,r2 - std Z+14,r23 - std Z+30,r12 - std Z+38,r13 - ldd r23,Z+15 - ldd r12,Z+31 - ldd r13,Z+39 - eor r3,r13 - eor r11,r23 - eor r13,r12 - mov r14,r23 - mov r0,r3 - com r0 - and r14,r0 - mov r15,r11 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r11 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r3 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r15 - eor r23,r24 - eor r11,r25 - eor r12,r16 - eor r13,r14 - eor r23,r3 - eor r12,r11 - eor r3,r13 - com r11 - std Z+7,r3 - std Z+15,r23 - std Z+31,r12 - std Z+39,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r18 - std Z+25,r19 - std Z+26,r20 - std Z+27,r21 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gascon128_core_round, .-gascon128_core_round - - .text -.global drysponge128_g - .type drysponge128_g, @function -drysponge128_g: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - subi r30,180 - sbci r31,255 - ld r19,Z - subi r30,76 - sbc r31,r1 - ldi r18,240 - std Z+40,r1 - std Z+41,r1 - std Z+42,r1 - std Z+43,r1 - std Z+44,r1 - std Z+45,r1 - std Z+46,r1 - std Z+47,r1 - std Z+48,r1 - std Z+49,r1 - std Z+50,r1 - std Z+51,r1 - std Z+52,r1 - std Z+53,r1 - std Z+54,r1 - std Z+55,r1 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 -38: - eor r4,r18 - ldd r12,Z+8 - ldd r13,Z+24 - ldd r14,Z+32 - eor r20,r14 - eor r4,r12 - eor r14,r13 - mov r15,r12 - mov r0,r20 - com r0 - and r15,r0 - mov r24,r4 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r4 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r20 - mov r0,r14 - com r0 - and r17,r0 - eor r20,r24 - eor r12,r25 - eor r4,r16 - eor r13,r17 - eor r14,r15 - eor r12,r20 - eor r13,r4 - eor r20,r14 - com r4 - st Z,r20 - std Z+8,r12 - std Z+24,r13 - std Z+32,r14 - ldd r12,Z+9 - ldd r13,Z+25 - ldd r14,Z+33 - eor r21,r14 - eor r5,r12 - eor r14,r13 - mov r15,r12 - mov r0,r21 - com r0 - and r15,r0 - mov r24,r5 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r5 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r21 - mov r0,r14 - com r0 - and r17,r0 - eor r21,r24 - eor r12,r25 - eor r5,r16 - eor r13,r17 - eor r14,r15 - eor r12,r21 - eor r13,r5 - eor r21,r14 - com r5 - std Z+1,r21 - std Z+9,r12 - std Z+25,r13 - std Z+33,r14 - ldd r12,Z+10 - ldd r13,Z+26 - ldd r14,Z+34 - eor r22,r14 - eor r6,r12 - eor r14,r13 - mov r15,r12 - mov r0,r22 - com r0 - and r15,r0 - mov r24,r6 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r6 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r22 - mov r0,r14 - com r0 - and r17,r0 - eor r22,r24 - eor r12,r25 - eor r6,r16 - eor r13,r17 - eor r14,r15 - eor r12,r22 - eor r13,r6 - eor r22,r14 - com r6 - std Z+2,r22 - std Z+10,r12 - std Z+26,r13 - std Z+34,r14 - ldd r12,Z+11 - ldd r13,Z+27 - ldd r14,Z+35 - eor r23,r14 - eor r7,r12 - eor r14,r13 - mov r15,r12 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r7 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r7 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r23 - mov r0,r14 - com r0 - and r17,r0 - eor r23,r24 - eor r12,r25 - eor r7,r16 - eor r13,r17 - eor r14,r15 - eor r12,r23 - eor r13,r7 - eor r23,r14 - com r7 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r14 - ldd r12,Z+12 - ldd r13,Z+28 - ldd r14,Z+36 - eor r26,r14 - eor r8,r12 - eor r14,r13 - mov r15,r12 - mov r0,r26 - com r0 - and r15,r0 - mov r24,r8 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r8 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r26 - mov r0,r14 - com r0 - and r17,r0 - eor r26,r24 - eor r12,r25 - eor r8,r16 - eor r13,r17 - eor r14,r15 - eor r12,r26 - eor r13,r8 - eor r26,r14 - com r8 - std Z+4,r26 - std Z+12,r12 - std Z+28,r13 - std Z+36,r14 - ldd r12,Z+13 - ldd r13,Z+29 - ldd r14,Z+37 - eor r27,r14 - eor r9,r12 - eor r14,r13 - mov r15,r12 - mov r0,r27 - com r0 - and r15,r0 - mov r24,r9 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r9 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r27 - mov r0,r14 - com r0 - and r17,r0 - eor r27,r24 - eor r12,r25 - eor r9,r16 - eor r13,r17 - eor r14,r15 - eor r12,r27 - eor r13,r9 - eor r27,r14 - com r9 - std Z+5,r27 - std Z+13,r12 - std Z+29,r13 - std Z+37,r14 - ldd r12,Z+14 - ldd r13,Z+30 - ldd r14,Z+38 - eor r2,r14 - eor r10,r12 - eor r14,r13 - mov r15,r12 - mov r0,r2 - com r0 - and r15,r0 - mov r24,r10 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r10 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r2 - mov r0,r14 - com r0 - and r17,r0 - eor r2,r24 - eor r12,r25 - eor r10,r16 - eor r13,r17 - eor r14,r15 - eor r12,r2 - eor r13,r10 - eor r2,r14 - com r10 - std Z+6,r2 - std Z+14,r12 - std Z+30,r13 - std Z+38,r14 - ldd r12,Z+15 - ldd r13,Z+31 - ldd r14,Z+39 - eor r3,r14 - eor r11,r12 - eor r14,r13 - mov r15,r12 - mov r0,r3 - com r0 - and r15,r0 - mov r24,r11 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r11 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r3 - mov r0,r14 - com r0 - and r17,r0 - eor r3,r24 - eor r12,r25 - eor r11,r16 - eor r13,r17 - eor r14,r15 - eor r12,r3 - eor r13,r11 - eor r3,r14 - com r11 - std Z+7,r3 - std Z+15,r12 - std Z+31,r13 - std Z+39,r14 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r20,Z+24 - ldd r21,Z+25 - ldd r22,Z+26 - ldd r23,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r20 - std Z+25,r21 - std Z+26,r22 - std Z+27,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r22,Z+34 - ldd r23,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r20 - std Z+33,r21 - std Z+34,r22 - std Z+35,r23 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - ldd r12,Z+40 - ldd r13,Z+41 - ldd r14,Z+42 - ldd r15,Z+43 - eor r12,r20 - eor r13,r21 - eor r14,r22 - eor r15,r23 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - std Z+40,r12 - std Z+41,r13 - std Z+42,r14 - std Z+43,r15 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - ldd r0,Z+24 - eor r12,r0 - ldd r0,Z+25 - eor r13,r0 - ldd r0,Z+26 - eor r14,r0 - ldd r0,Z+27 - eor r15,r0 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ldd r12,Z+48 - ldd r13,Z+49 - ldd r14,Z+50 - ldd r15,Z+51 - ldd r0,Z+8 - eor r12,r0 - ldd r0,Z+9 - eor r13,r0 - ldd r0,Z+10 - eor r14,r0 - ldd r0,Z+11 - eor r15,r0 - ldd r0,Z+28 - eor r12,r0 - ldd r0,Z+29 - eor r13,r0 - ldd r0,Z+30 - eor r14,r0 - ldd r0,Z+31 - eor r15,r0 - std Z+48,r12 - std Z+49,r13 - std Z+50,r14 - std Z+51,r15 - ldd r12,Z+52 - ldd r13,Z+53 - ldd r14,Z+54 - ldd r15,Z+55 - ldd r0,Z+12 - eor r12,r0 - ldd r0,Z+13 - eor r13,r0 - ldd r0,Z+14 - eor r14,r0 - ldd r0,Z+15 - eor r15,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - std Z+52,r12 - std Z+53,r13 - std Z+54,r14 - std Z+55,r15 - subi r18,15 - dec r19 - breq 5904f - rjmp 38b -5904: - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size drysponge128_g, .-drysponge128_g - - .text -.global gascon256_core_round - .type gascon256_core_round, @function -gascon256_core_round: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 26 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r26,Z+ - ld r27,Z+ - ld r2,Z+ - ld r3,Z+ - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - eor r4,r22 - ld r22,Z - ldd r23,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r23,r22 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r22 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r18 - eor r12,r23 - eor r13,r4 - eor r15,r14 - eor r18,r24 - com r4 - std Y+1,r18 - st Z,r22 - std Z+8,r23 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r22,Z+1 - ldd r23,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r23,r22 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r22 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r19 - eor r12,r23 - eor r13,r5 - eor r15,r14 - eor r19,r24 - com r5 - std Y+2,r19 - std Z+1,r22 - std Z+9,r23 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r22,Z+2 - ldd r23,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r23,r22 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r22 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r20 - eor r12,r23 - eor r13,r6 - eor r15,r14 - eor r20,r24 - com r6 - std Y+3,r20 - std Z+2,r22 - std Z+10,r23 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r22,Z+3 - ldd r23,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r23,r22 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r22 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r21 - eor r12,r23 - eor r13,r7 - eor r15,r14 - eor r21,r24 - com r7 - std Y+4,r21 - std Z+3,r22 - std Z+11,r23 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r22,Z+4 - ldd r23,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r26,r24 - eor r23,r22 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r22 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r26 - eor r12,r23 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+5,r26 - std Z+4,r22 - std Z+12,r23 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r22,Z+5 - ldd r23,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r27,r24 - eor r23,r22 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r22 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r27 - eor r12,r23 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+6,r27 - std Z+5,r22 - std Z+13,r23 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r22,Z+6 - ldd r23,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r2,r24 - eor r23,r22 - eor r10,r12 - eor r14,r13 - eor r24,r15 - mov r17,r2 - mov r25,r22 - mov r0,r2 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r10 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r10 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r2 - eor r12,r23 - eor r13,r10 - eor r15,r14 - eor r2,r24 - com r10 - std Y+7,r2 - std Z+6,r22 - std Z+14,r23 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r22,Z+7 - ldd r23,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r3,r24 - eor r23,r22 - eor r11,r12 - eor r14,r13 - eor r24,r15 - mov r17,r3 - mov r25,r22 - mov r0,r3 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r11 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r11 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r3 - eor r12,r23 - eor r13,r11 - eor r15,r14 - eor r3,r24 - com r11 - std Y+8,r3 - std Z+7,r22 - std Z+15,r23 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - bst r22,0 - lsr r13 - ror r12 - ror r23 - ror r22 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r26 - std Z+21,r27 - std Z+22,r2 - std Z+23,r3 - movw r22,r4 - movw r12,r6 - movw r14,r8 - movw r24,r10 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r4 - eor r15,r5 - eor r24,r6 - eor r25,r7 - eor r22,r8 - eor r23,r9 - eor r12,r10 - eor r13,r11 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r14 - eor r5,r15 - eor r6,r24 - eor r7,r25 - eor r8,r22 - eor r9,r23 - eor r10,r12 - eor r11,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r12 - mov r12,r0 - mov r0,r23 - mov r23,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r26 - std Z+45,r27 - std Z+46,r2 - std Z+47,r3 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r26,Z+52 - ldd r27,Z+53 - ldd r2,Z+54 - ldd r3,Z+55 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r26 - std Z+53,r27 - std Z+54,r2 - std Z+55,r3 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r26,Z+60 - ldd r27,Z+61 - ldd r2,Z+62 - ldd r3,Z+63 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r26 - std Z+61,r27 - std Z+62,r2 - std Z+63,r3 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r2,Y+7 - ldd r3,Y+8 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+24,r4 - std Z+25,r5 - std Z+26,r6 - std Z+27,r7 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - st -Z,r3 - st -Z,r2 - st -Z,r27 - st -Z,r26 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - adiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gascon256_core_round, .-gascon256_core_round - - .text -.global drysponge256_g - .type drysponge256_g, @function -drysponge256_g: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 44 - subi r30,148 - sbci r31,255 - ld r19,Z - subi r30,108 - sbc r31,r1 - ldi r18,240 - std Y+25,r19 - std Y+26,r18 - std Y+9,r1 - std Y+10,r1 - std Y+11,r1 - std Y+12,r1 - std Y+13,r1 - std Y+14,r1 - std Y+15,r1 - std Y+16,r1 - std Y+17,r1 - std Y+18,r1 - std Y+19,r1 - std Y+20,r1 - std Y+21,r1 - std Y+22,r1 - std Y+23,r1 - std Y+24,r1 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r22,Z+ - ld r23,Z+ - ld r26,Z+ - ld r27,Z+ - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 -40: - ldd r24,Y+26 - eor r2,r24 - subi r24,15 - std Y+26,r24 - ld r10,Z - ldd r11,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r11,r10 - eor r2,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r10 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r2 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r2 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r18 - eor r12,r11 - eor r13,r2 - eor r15,r14 - eor r18,r24 - com r2 - std Y+1,r18 - st Z,r10 - std Z+8,r11 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r10,Z+1 - ldd r11,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r11,r10 - eor r3,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r10 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r3 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r3 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r19 - eor r12,r11 - eor r13,r3 - eor r15,r14 - eor r19,r24 - com r3 - std Y+2,r19 - std Z+1,r10 - std Z+9,r11 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r10,Z+2 - ldd r11,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r11,r10 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r10 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r20 - eor r12,r11 - eor r13,r4 - eor r15,r14 - eor r20,r24 - com r4 - std Y+3,r20 - std Z+2,r10 - std Z+10,r11 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r10,Z+3 - ldd r11,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r11,r10 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r10 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r21 - eor r12,r11 - eor r13,r5 - eor r15,r14 - eor r21,r24 - com r5 - std Y+4,r21 - std Z+3,r10 - std Z+11,r11 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r10,Z+4 - ldd r11,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r22,r24 - eor r11,r10 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r22 - mov r25,r10 - mov r0,r22 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r22 - eor r12,r11 - eor r13,r6 - eor r15,r14 - eor r22,r24 - com r6 - std Y+5,r22 - std Z+4,r10 - std Z+12,r11 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r10,Z+5 - ldd r11,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r23,r24 - eor r11,r10 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r23 - mov r25,r10 - mov r0,r23 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r23 - eor r12,r11 - eor r13,r7 - eor r15,r14 - eor r23,r24 - com r7 - std Y+6,r23 - std Z+5,r10 - std Z+13,r11 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r10,Z+6 - ldd r11,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r26,r24 - eor r11,r10 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r10 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r26 - eor r12,r11 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+7,r26 - std Z+6,r10 - std Z+14,r11 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r10,Z+7 - ldd r11,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r27,r24 - eor r11,r10 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r10 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r27 - eor r12,r11 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+8,r27 - std Z+7,r10 - std Z+15,r11 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - bst r10,0 - lsr r13 - ror r12 - ror r11 - ror r10 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r22 - std Z+13,r23 - std Z+14,r26 - std Z+15,r27 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r26,Z+22 - ldd r27,Z+23 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r23 - mov r23,r26 - mov r26,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r22 - std Z+21,r23 - std Z+22,r26 - std Z+23,r27 - movw r10,r2 - movw r12,r4 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - mov r0,r2 - mov r2,r4 - mov r4,r0 - mov r0,r3 - mov r3,r5 - mov r5,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - or r5,r0 - mov r0,r6 - mov r6,r8 - mov r8,r0 - mov r0,r7 - mov r7,r9 - mov r9,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r6,r10 - eor r7,r11 - eor r8,r12 - eor r9,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r26,Z+38 - ldd r27,Z+39 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r12 - mov r12,r0 - mov r0,r11 - mov r11,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r22 - std Z+37,r23 - std Z+38,r26 - std Z+39,r27 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r22,Z+44 - ldd r23,Z+45 - ldd r26,Z+46 - ldd r27,Z+47 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r22 - std Z+45,r23 - std Z+46,r26 - std Z+47,r27 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r22,Z+52 - ldd r23,Z+53 - ldd r26,Z+54 - ldd r27,Z+55 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r22 - std Z+53,r23 - std Z+54,r26 - std Z+55,r27 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r22,Z+60 - ldd r23,Z+61 - ldd r26,Z+62 - ldd r27,Z+63 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r22 - std Z+61,r23 - std Z+62,r26 - std Z+63,r27 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - ldd r10,Y+9 - ldd r11,Y+10 - ldd r12,Y+11 - ldd r13,Y+12 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - ldd r0,Z+12 - eor r10,r0 - ldd r0,Z+13 - eor r11,r0 - ldd r0,Z+14 - eor r12,r0 - ldd r0,Z+15 - eor r13,r0 - ldd r0,Z+32 - eor r10,r0 - ldd r0,Z+33 - eor r11,r0 - ldd r0,Z+34 - eor r12,r0 - ldd r0,Z+35 - eor r13,r0 - ldd r0,Z+52 - eor r10,r0 - ldd r0,Z+53 - eor r11,r0 - ldd r0,Z+54 - eor r12,r0 - ldd r0,Z+55 - eor r13,r0 - std Y+9,r10 - std Y+10,r11 - std Y+11,r12 - std Y+12,r13 - ldd r10,Y+13 - ldd r11,Y+14 - ldd r12,Y+15 - ldd r13,Y+16 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - ldd r0,Z+16 - eor r10,r0 - ldd r0,Z+17 - eor r11,r0 - ldd r0,Z+18 - eor r12,r0 - ldd r0,Z+19 - eor r13,r0 - ldd r0,Z+36 - eor r10,r0 - ldd r0,Z+37 - eor r11,r0 - ldd r0,Z+38 - eor r12,r0 - ldd r0,Z+39 - eor r13,r0 - ldd r0,Z+40 - eor r10,r0 - ldd r0,Z+41 - eor r11,r0 - ldd r0,Z+42 - eor r12,r0 - ldd r0,Z+43 - eor r13,r0 - std Y+13,r10 - std Y+14,r11 - std Y+15,r12 - std Y+16,r13 - ldd r10,Y+17 - ldd r11,Y+18 - ldd r12,Y+19 - ldd r13,Y+20 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - ld r0,Z - eor r10,r0 - ldd r0,Z+1 - eor r11,r0 - ldd r0,Z+2 - eor r12,r0 - ldd r0,Z+3 - eor r13,r0 - ldd r0,Z+20 - eor r10,r0 - ldd r0,Z+21 - eor r11,r0 - ldd r0,Z+22 - eor r12,r0 - ldd r0,Z+23 - eor r13,r0 - ldd r0,Z+44 - eor r10,r0 - ldd r0,Z+45 - eor r11,r0 - ldd r0,Z+46 - eor r12,r0 - ldd r0,Z+47 - eor r13,r0 - std Y+17,r10 - std Y+18,r11 - std Y+19,r12 - std Y+20,r13 - ldd r10,Y+21 - ldd r11,Y+22 - ldd r12,Y+23 - ldd r13,Y+24 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - ldd r0,Z+4 - eor r10,r0 - ldd r0,Z+5 - eor r11,r0 - ldd r0,Z+6 - eor r12,r0 - ldd r0,Z+7 - eor r13,r0 - ldd r0,Z+8 - eor r10,r0 - ldd r0,Z+9 - eor r11,r0 - ldd r0,Z+10 - eor r12,r0 - ldd r0,Z+11 - eor r13,r0 - ldd r0,Z+48 - eor r10,r0 - ldd r0,Z+49 - eor r11,r0 - ldd r0,Z+50 - eor r12,r0 - ldd r0,Z+51 - eor r13,r0 - std Y+21,r10 - std Y+22,r11 - std Y+23,r12 - std Y+24,r13 - ldd r10,Y+25 - dec r10 - std Y+25,r10 - breq 6623f - rjmp 40b -6623: - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - std Z+28,r6 - std Z+29,r7 - std Z+30,r8 - std Z+31,r9 - st -Z,r27 - st -Z,r26 - st -Z,r23 - st -Z,r22 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - ldi r25,72 - add r30,r25 - adc r31,r1 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - ldd r2,Y+17 - ldd r3,Y+18 - ldd r4,Y+19 - ldd r5,Y+20 - ldd r6,Y+21 - ldd r7,Y+22 - ldd r8,Y+23 - ldd r9,Y+24 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - std Z+12,r6 - std Z+13,r7 - std Z+14,r8 - std Z+15,r9 - adiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size drysponge256_g, .-drysponge256_g - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.c b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.c deleted file mode 100644 index 6dfe48c..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.c +++ /dev/null @@ -1,611 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-drysponge.h" -#include - -#if !defined(__AVR__) - -/* Right rotations in bit-interleaved format */ -#define intRightRotateEven(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, (bits)); \ - _x1 = rightRotate(_x1, (bits)); \ - _x0 | (((uint64_t)_x1) << 32); \ - })) -#define intRightRotateOdd(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, ((bits) + 1) % 32); \ - _x1 = rightRotate(_x1, (bits)); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate1_64(x) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate1(_x0); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate2_64(x) (intRightRotateEven((x), 1)) -#define intRightRotate3_64(x) (intRightRotateOdd((x), 1)) -#define intRightRotate4_64(x) (intRightRotateEven((x), 2)) -#define intRightRotate5_64(x) (intRightRotateOdd((x), 2)) -#define intRightRotate6_64(x) (intRightRotateEven((x), 3)) -#define intRightRotate7_64(x) (intRightRotateOdd((x), 3)) -#define intRightRotate8_64(x) (intRightRotateEven((x), 4)) -#define intRightRotate9_64(x) (intRightRotateOdd((x), 4)) -#define intRightRotate10_64(x) (intRightRotateEven((x), 5)) -#define intRightRotate11_64(x) (intRightRotateOdd((x), 5)) -#define intRightRotate12_64(x) (intRightRotateEven((x), 6)) -#define intRightRotate13_64(x) (intRightRotateOdd((x), 6)) -#define intRightRotate14_64(x) (intRightRotateEven((x), 7)) -#define intRightRotate15_64(x) (intRightRotateOdd((x), 7)) -#define intRightRotate16_64(x) (intRightRotateEven((x), 8)) -#define intRightRotate17_64(x) (intRightRotateOdd((x), 8)) -#define intRightRotate18_64(x) (intRightRotateEven((x), 9)) -#define intRightRotate19_64(x) (intRightRotateOdd((x), 9)) -#define intRightRotate20_64(x) (intRightRotateEven((x), 10)) -#define intRightRotate21_64(x) (intRightRotateOdd((x), 10)) -#define intRightRotate22_64(x) (intRightRotateEven((x), 11)) -#define intRightRotate23_64(x) (intRightRotateOdd((x), 11)) -#define intRightRotate24_64(x) (intRightRotateEven((x), 12)) -#define intRightRotate25_64(x) (intRightRotateOdd((x), 12)) -#define intRightRotate26_64(x) (intRightRotateEven((x), 13)) -#define intRightRotate27_64(x) (intRightRotateOdd((x), 13)) -#define intRightRotate28_64(x) (intRightRotateEven((x), 14)) -#define intRightRotate29_64(x) (intRightRotateOdd((x), 14)) -#define intRightRotate30_64(x) (intRightRotateEven((x), 15)) -#define intRightRotate31_64(x) (intRightRotateOdd((x), 15)) -#define intRightRotate32_64(x) (intRightRotateEven((x), 16)) -#define intRightRotate33_64(x) (intRightRotateOdd((x), 16)) -#define intRightRotate34_64(x) (intRightRotateEven((x), 17)) -#define intRightRotate35_64(x) (intRightRotateOdd((x), 17)) -#define intRightRotate36_64(x) (intRightRotateEven((x), 18)) -#define intRightRotate37_64(x) (intRightRotateOdd((x), 18)) -#define intRightRotate38_64(x) (intRightRotateEven((x), 19)) -#define intRightRotate39_64(x) (intRightRotateOdd((x), 19)) -#define intRightRotate40_64(x) (intRightRotateEven((x), 20)) -#define intRightRotate41_64(x) (intRightRotateOdd((x), 20)) -#define intRightRotate42_64(x) (intRightRotateEven((x), 21)) -#define intRightRotate43_64(x) (intRightRotateOdd((x), 21)) -#define intRightRotate44_64(x) (intRightRotateEven((x), 22)) -#define intRightRotate45_64(x) (intRightRotateOdd((x), 22)) -#define intRightRotate46_64(x) (intRightRotateEven((x), 23)) -#define intRightRotate47_64(x) (intRightRotateOdd((x), 23)) -#define intRightRotate48_64(x) (intRightRotateEven((x), 24)) -#define intRightRotate49_64(x) (intRightRotateOdd((x), 24)) -#define intRightRotate50_64(x) (intRightRotateEven((x), 25)) -#define intRightRotate51_64(x) (intRightRotateOdd((x), 25)) -#define intRightRotate52_64(x) (intRightRotateEven((x), 26)) -#define intRightRotate53_64(x) (intRightRotateOdd((x), 26)) -#define intRightRotate54_64(x) (intRightRotateEven((x), 27)) -#define intRightRotate55_64(x) (intRightRotateOdd((x), 27)) -#define intRightRotate56_64(x) (intRightRotateEven((x), 28)) -#define intRightRotate57_64(x) (intRightRotateOdd((x), 28)) -#define intRightRotate58_64(x) (intRightRotateEven((x), 29)) -#define intRightRotate59_64(x) (intRightRotateOdd((x), 29)) -#define intRightRotate60_64(x) (intRightRotateEven((x), 30)) -#define intRightRotate61_64(x) (intRightRotateOdd((x), 30)) -#define intRightRotate62_64(x) (intRightRotateEven((x), 31)) -#define intRightRotate63_64(x) (intRightRotateOdd((x), 31)) - -void gascon128_core_round(gascon128_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); -#endif - - /* Add the round constant to the middle of the state */ - x2 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x4; x2 ^= x1; x4 ^= x3; t0 = (~x0) & x1; t1 = (~x1) & x2; - t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x0; x0 ^= t1; - x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; x1 ^= x0; x3 ^= x2; - x0 ^= x4; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); -#endif -} - -void gascon256_core_round(gascon256_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; - uint64_t x8 = state->S[8]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); - uint64_t x8 = le_load_word64(state->B + 64); -#endif - - /* Add the round constant to the middle of the state */ - x4 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x8; x2 ^= x1; x4 ^= x3; x6 ^= x5; x8 ^= x7; t0 = (~x0) & x1; - t1 = (~x1) & x2; t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x5; - t5 = (~x5) & x6; t6 = (~x6) & x7; t7 = (~x7) & x8; t8 = (~x8) & x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t5; x5 ^= t6; x6 ^= t7; - x7 ^= t8; x8 ^= t0; x1 ^= x0; x3 ^= x2; x5 ^= x4; x7 ^= x6; x0 ^= x8; - x4 = ~x4; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - x5 ^= intRightRotate31_64(x5) ^ intRightRotate26_64(x5); - x6 ^= intRightRotate53_64(x6) ^ intRightRotate58_64(x6); - x7 ^= intRightRotate9_64(x7) ^ intRightRotate46_64(x7); - x8 ^= intRightRotate43_64(x8) ^ intRightRotate50_64(x8); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; - state->S[8] = x8; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); - le_store_word64(state->B + 64, x8); -#endif -} - -void drysponge128_g(drysponge128_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes */ - gascon128_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon128_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4]; - } -} - -void drysponge256_g(drysponge256_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes. - * And so on for a total of 64 bytes XOR'ed into the output data. */ - gascon256_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon256_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - } -} - -#endif /* !__AVR__ */ - -void drysponge128_g_core(drysponge128_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon128_core_round(&(state->c), round); -} - -void drysponge256_g_core(drysponge256_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon256_core_round(&(state->c), round); -} - -/** - * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) - * \brief Selects an element of x in constant time. - * - * \param x Points to the four elements of x. - * \param index Index of which element to extract between 0 and 3. - * - * \return The selected element of x. - */ -#if !defined(__AVR__) -STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) -{ - /* We need to be careful how we select each element of x because - * we are doing a data-dependent fetch here. Do the fetch in a way - * that should avoid cache timing issues by fetching every element - * of x and masking away the ones we don't want. - * - * There is a possible side channel here with respect to power analysis. - * The "mask" value will be all-ones for the selected index and all-zeroes - * for the other indexes. This may show up as different power consumption - * for the "result ^= x[i] & mask" statement when i is the selected index. - * Such a side channel could in theory allow reading the plaintext input - * to the cipher by analysing the CPU's power consumption. - * - * The DryGASCON specification acknowledges the possibility of plaintext - * recovery in section 7.4. For software mitigation the specification - * suggests randomization of the indexes into c and x and randomization - * of the order of processing words. We aren't doing that here yet. - * Patches welcome to fix this. - */ - uint32_t mask = -((uint32_t)((0x04 - index) >> 2)); - uint32_t result = x[0] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x01)) >> 2)); - result ^= x[1] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x02)) >> 2)); - result ^= x[2] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); - return result ^ (x[3] & mask); -} -#else -/* AVR is more or less immune to cache timing issues because it doesn't - * have anything like an L1 or L2 cache. Select the word directly */ -#define drysponge_select_x(x, index) ((x)[(index)]) -#endif - -/** - * \brief Mixes a 32-bit value into the DrySPONGE128 state. - * - * \param state DrySPONGE128 state. - * \param data The data to be mixed in the bottom 10 bits. - */ -static void drysponge128_mix_phase_round - (drysponge128_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); -} - -/** - * \brief Mixes a 32-bit value into the DrySPONGE256 state. - * - * \param state DrySPONGE256 state. - * \param data The data to be mixed in the bottom 18 bits. - */ -static void drysponge256_mix_phase_round - (drysponge256_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); - state->c.W[10] ^= drysponge_select_x(state->x.W, (data >> 10) & 0x03); - state->c.W[12] ^= drysponge_select_x(state->x.W, (data >> 12) & 0x03); - state->c.W[14] ^= drysponge_select_x(state->x.W, (data >> 14) & 0x03); - state->c.W[16] ^= drysponge_select_x(state->x.W, (data >> 16) & 0x03); -} - -/** - * \brief Mixes an input block into a DrySPONGE128 state. - * - * \param state The DrySPONGE128 state. - * \param data Full rate block containing the input data. - */ -static void drysponge128_mix_phase - (drysponge128_state_t *state, const unsigned char data[DRYSPONGE128_RATE]) -{ - /* Mix 10-bit groups into the output, with the domain - * separator added to the last two groups */ - drysponge128_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[1] >> 2) | (((uint32_t)(data[2])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[2] >> 4) | (((uint32_t)(data[3])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[3] >> 6) | (((uint32_t)(data[4])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[5] | (((uint32_t)(data[6])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[6] >> 2) | (((uint32_t)(data[7])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[7] >> 4) | (((uint32_t)(data[8])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[8] >> 6) | (((uint32_t)(data[9])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[10] | (((uint32_t)(data[11])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[12] >> 4) | (((uint32_t)(data[13])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, ((data[13] >> 6) | (((uint32_t)(data[14])) << 2))); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, data[15] ^ state->domain); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, state->domain >> 10); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -/** - * \brief Mixes an input block into a DrySPONGE256 state. - * - * \param state The DrySPONGE256 state. - * \param data Full rate block containing the input data. - */ -static void drysponge256_mix_phase - (drysponge256_state_t *state, const unsigned char data[DRYSPONGE256_RATE]) -{ - /* Mix 18-bit groups into the output, with the domain in the last group */ - drysponge256_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8) | - (((uint32_t)(data[2])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[2] >> 2) | (((uint32_t)(data[3])) << 6) | - (((uint32_t)(data[4])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[4] >> 4) | (((uint32_t)(data[5])) << 4) | - (((uint32_t)(data[6])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[6] >> 6) | (((uint32_t)(data[7])) << 2) | - (((uint32_t)(data[8])) << 10)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, data[9] | (((uint32_t)(data[10])) << 8) | - (((uint32_t)(data[11])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6) | - (((uint32_t)(data[13])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[13] >> 4) | (((uint32_t)(data[14])) << 4) | - (((uint32_t)(data[15])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[15] >> 6) ^ state->domain); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE128_RATE) { - drysponge128_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE128_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE128_RATE - len - 1); - drysponge128_mix_phase(state, padded); - } -} - -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE256_RATE) { - drysponge256_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE256_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE256_RATE - len - 1); - drysponge256_mix_phase(state, padded); - } -} - -/** - * \brief Determine if some of the words of an "x" value are identical. - * - * \param x Points to the "x" buffer to check. - * - * \return Non-zero if some of the words are the same, zero if they are - * distinct from each other. - * - * We try to perform the check in constant time to avoid giving away - * any information about the value of the key. - */ -static int drysponge_x_words_are_same(const uint32_t x[4]) -{ - unsigned i, j; - int result = 0; - for (i = 0; i < 3; ++i) { - for (j = i + 1; j < 4; ++j) { - uint32_t check = x[i] ^ x[j]; - result |= (int)((0x100000000ULL - check) >> 32); - } - } - return result; -} - -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-128 state with repeated copies of the key */ - memcpy(state->c.B, key, 16); - memcpy(state->c.B + 16, key, 16); - memcpy(state->c.B + 32, key, 8); - - /* Generate the "x" value for the state. All four words of "x" - * must be unique because they will be used in drysponge_select_x() - * as stand-ins for the bit pairs 00, 01, 10, and 11. - * - * Run the core block operation over and over until "x" is unique. - * Technically the runtime here is key-dependent and not constant. - * If the input key is randomized, this should only take 1 round - * on average so it is "almost constant time". - */ - do { - gascon128_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE128_INIT_ROUNDS; - state->domain = DRYDOMAIN128_NONCE; - if (final_block) - state->domain |= DRYDOMAIN128_FINAL; - drysponge128_f_absorb(state, nonce, 16); - drysponge128_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE128_ROUNDS; -} - -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-256 state with repeated copies of the key */ - memcpy(state->c.B, key, 32); - memcpy(state->c.B + 32, key, 32); - memcpy(state->c.B + 64, key, 8); - - /* Generate the "x" value for the state */ - do { - gascon256_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE256_INIT_ROUNDS; - state->domain = DRYDOMAIN256_NONCE; - if (final_block) - state->domain |= DRYDOMAIN256_FINAL; - drysponge256_f_absorb(state, nonce, 16); - drysponge256_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE256_ROUNDS; -} diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.h b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.h deleted file mode 100644 index 05b0c16..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-drysponge.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_DRYSPONGE_H -#define LW_INTERNAL_DRYSPONGE_H - -#include "internal-util.h" - -/** - * \file internal-drysponge.h - * \brief Internal implementation of DrySPONGE for the DryGASCON cipher. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the GASCON-128 permutation state in bytes. - */ -#define GASCON128_STATE_SIZE 40 - -/** - * \brief Size of the GASCON-256 permutation state in bytes. - */ -#define GASCON256_STATE_SIZE 72 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE128. - */ -#define DRYSPONGE128_RATE 16 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE256. - */ -#define DRYSPONGE256_RATE 16 - -/** - * \brief Size of the "x" value for DrySPONGE128. - */ -#define DRYSPONGE128_XSIZE 16 - -/** - * \brief Size of the "x" value for DrySPONGE256. - */ -#define DRYSPONGE256_XSIZE 16 - -/** - * \brief Normal number of rounds for DrySPONGE128 when absorbing - * and squeezing data. - */ -#define DRYSPONGE128_ROUNDS 7 - -/** - * \brief Number of rounds for DrySPONGE128 during initialization. - */ -#define DRYSPONGE128_INIT_ROUNDS 11 - -/** - * \brief Normal number of rounds for DrySPONGE256 when absorbing - * and squeezing data. - */ -#define DRYSPONGE256_ROUNDS 8 - -/** - * \brief Number of rounds for DrySPONGE256 during initialization. - */ -#define DRYSPONGE256_INIT_ROUNDS 12 - -/** - * \brief DrySPONGE128 domain bit for a padded block. - */ -#define DRYDOMAIN128_PADDED (1 << 8) - -/** - * \brief DrySPONGE128 domain bit for a final block. - */ -#define DRYDOMAIN128_FINAL (1 << 9) - -/** - * \brief DrySPONGE128 domain value for processing the nonce. - */ -#define DRYDOMAIN128_NONCE (1 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the associated data. - */ -#define DRYDOMAIN128_ASSOC_DATA (2 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the message. - */ -#define DRYDOMAIN128_MESSAGE (3 << 10) - -/** - * \brief DrySPONGE256 domain bit for a padded block. - */ -#define DRYDOMAIN256_PADDED (1 << 2) - -/** - * \brief DrySPONGE256 domain bit for a final block. - */ -#define DRYDOMAIN256_FINAL (1 << 3) - -/** - * \brief DrySPONGE256 domain value for processing the nonce. - */ -#define DRYDOMAIN256_NONCE (1 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the associated data. - */ -#define DRYDOMAIN256_ASSOC_DATA (2 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the message. - */ -#define DRYDOMAIN256_MESSAGE (3 << 4) - -/** - * \brief Internal state of the GASCON-128 permutation. - */ -typedef union -{ - uint64_t S[GASCON128_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON128_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON128_STATE_SIZE]; /**< Bytes of the state */ - -} gascon128_state_t; - -/** - * \brief Internal state of the GASCON-256 permutation. - */ -typedef union -{ - uint64_t S[GASCON256_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON256_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON256_STATE_SIZE]; /**< Bytes of the state */ - -} gascon256_state_t; - -/** - * \brief Structure of a rate block for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_RATE]; /**< Bytes of the rate */ - -} drysponge128_rate_t; - -/** - * \brief Structure of a rate block for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_RATE]; /**< Bytes of the rate */ - -} drysponge256_rate_t; - -/** - * \brief Structure of the "x" value for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_XSIZE]; /**< Bytes of the rate */ - -} drysponge128_x_t; - -/** - * \brief Structure of the "x" value for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_XSIZE]; /**< Bytes of the rate */ - -} drysponge256_x_t; - -/** - * \brief Structure of the rolling DrySPONGE128 state. - */ -typedef struct -{ - gascon128_state_t c; /**< GASCON-128 state for the capacity */ - drysponge128_rate_t r; /**< Buffer for a rate block of data */ - drysponge128_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge128_state_t; - -/** - * \brief Structure of the rolling DrySPONGE256 state. - */ -typedef struct -{ - gascon256_state_t c; /**< GASCON-256 state for the capacity */ - drysponge256_rate_t r; /**< Buffer for a rate block of data */ - drysponge256_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge256_state_t; - -/** - * \brief Permutes the GASCON-128 state using one iteration of CoreRound. - * - * \param state The GASCON-128 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon128_core_round(gascon128_state_t *state, uint8_t round); - -/** - * \brief Permutes the GASCON-256 state using one iteration of CoreRound. - * - * \param state The GASCON-256 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon256_core_round(gascon256_state_t *state, uint8_t round); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds and squeezes data out of the GASGON-128 state. - * - * \param state The DrySPONGE128 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge128_g(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds and squeezes data out of the GASGON-256 state. - * - * \param state The DrySPONGE256 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge256_g(drysponge256_state_t *state); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE128 state. - */ -void drysponge128_g_core(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE256 state. - */ -void drysponge256_g_core(drysponge256_state_t *state); - -/** - * \brief Performs the absorption phase of the DrySPONGE128 F function. - * - * \param state The DrySPONGE128 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE128_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge128_g() or - * drysponge128_g_core() to perform the full F operation. - */ -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Performs the absorption phase of the DrySPONGE256 F function. - * - * \param state The DrySPONGE256 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE256_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge256_g() or - * drysponge256_g_core() to perform the full F operation. - */ -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Set up a DrySPONGE128 state to begin encryption or decryption. - * - * \param state The DrySPONGE128 state. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -/** - * \brief Set up a DrySPONGE256 state to begin encryption or decryption. - * - * \param state The DrySPONGE256 state. - * \param key Points to the 32 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-util.h b/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge-avr.S b/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge-avr.S new file mode 100644 index 0000000..84d0ff8 --- /dev/null +++ b/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge-avr.S @@ -0,0 +1,5092 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gascon128_core_round + .type gascon128_core_round, @function +gascon128_core_round: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + eor r4,r22 + ldd r23,Z+8 + ldd r12,Z+24 + ldd r13,Z+32 + eor r18,r13 + eor r4,r23 + eor r13,r12 + mov r14,r23 + mov r0,r18 + com r0 + and r14,r0 + mov r15,r4 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r4 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r18 + mov r0,r13 + com r0 + and r16,r0 + eor r18,r15 + eor r23,r24 + eor r4,r25 + eor r12,r16 + eor r13,r14 + eor r23,r18 + eor r12,r4 + eor r18,r13 + com r4 + st Z,r18 + std Z+8,r23 + std Z+24,r12 + std Z+32,r13 + ldd r23,Z+9 + ldd r12,Z+25 + ldd r13,Z+33 + eor r19,r13 + eor r5,r23 + eor r13,r12 + mov r14,r23 + mov r0,r19 + com r0 + and r14,r0 + mov r15,r5 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r5 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r19 + mov r0,r13 + com r0 + and r16,r0 + eor r19,r15 + eor r23,r24 + eor r5,r25 + eor r12,r16 + eor r13,r14 + eor r23,r19 + eor r12,r5 + eor r19,r13 + com r5 + std Z+1,r19 + std Z+9,r23 + std Z+25,r12 + std Z+33,r13 + ldd r23,Z+10 + ldd r12,Z+26 + ldd r13,Z+34 + eor r20,r13 + eor r6,r23 + eor r13,r12 + mov r14,r23 + mov r0,r20 + com r0 + and r14,r0 + mov r15,r6 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r6 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r20 + mov r0,r13 + com r0 + and r16,r0 + eor r20,r15 + eor r23,r24 + eor r6,r25 + eor r12,r16 + eor r13,r14 + eor r23,r20 + eor r12,r6 + eor r20,r13 + com r6 + std Z+2,r20 + std Z+10,r23 + std Z+26,r12 + std Z+34,r13 + ldd r23,Z+11 + ldd r12,Z+27 + ldd r13,Z+35 + eor r21,r13 + eor r7,r23 + eor r13,r12 + mov r14,r23 + mov r0,r21 + com r0 + and r14,r0 + mov r15,r7 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r7 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r21 + mov r0,r13 + com r0 + and r16,r0 + eor r21,r15 + eor r23,r24 + eor r7,r25 + eor r12,r16 + eor r13,r14 + eor r23,r21 + eor r12,r7 + eor r21,r13 + com r7 + std Z+3,r21 + std Z+11,r23 + std Z+27,r12 + std Z+35,r13 + ldd r23,Z+12 + ldd r12,Z+28 + ldd r13,Z+36 + eor r26,r13 + eor r8,r23 + eor r13,r12 + mov r14,r23 + mov r0,r26 + com r0 + and r14,r0 + mov r15,r8 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r8 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r26 + mov r0,r13 + com r0 + and r16,r0 + eor r26,r15 + eor r23,r24 + eor r8,r25 + eor r12,r16 + eor r13,r14 + eor r23,r26 + eor r12,r8 + eor r26,r13 + com r8 + std Z+4,r26 + std Z+12,r23 + std Z+28,r12 + std Z+36,r13 + ldd r23,Z+13 + ldd r12,Z+29 + ldd r13,Z+37 + eor r27,r13 + eor r9,r23 + eor r13,r12 + mov r14,r23 + mov r0,r27 + com r0 + and r14,r0 + mov r15,r9 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r9 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r27 + mov r0,r13 + com r0 + and r16,r0 + eor r27,r15 + eor r23,r24 + eor r9,r25 + eor r12,r16 + eor r13,r14 + eor r23,r27 + eor r12,r9 + eor r27,r13 + com r9 + std Z+5,r27 + std Z+13,r23 + std Z+29,r12 + std Z+37,r13 + ldd r23,Z+14 + ldd r12,Z+30 + ldd r13,Z+38 + eor r2,r13 + eor r10,r23 + eor r13,r12 + mov r14,r23 + mov r0,r2 + com r0 + and r14,r0 + mov r15,r10 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r10 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r2 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r15 + eor r23,r24 + eor r10,r25 + eor r12,r16 + eor r13,r14 + eor r23,r2 + eor r12,r10 + eor r2,r13 + com r10 + std Z+6,r2 + std Z+14,r23 + std Z+30,r12 + std Z+38,r13 + ldd r23,Z+15 + ldd r12,Z+31 + ldd r13,Z+39 + eor r3,r13 + eor r11,r23 + eor r13,r12 + mov r14,r23 + mov r0,r3 + com r0 + and r14,r0 + mov r15,r11 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r11 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r3 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r15 + eor r23,r24 + eor r11,r25 + eor r12,r16 + eor r13,r14 + eor r23,r3 + eor r12,r11 + eor r3,r13 + com r11 + std Z+7,r3 + std Z+15,r23 + std Z+31,r12 + std Z+39,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r18 + std Z+25,r19 + std Z+26,r20 + std Z+27,r21 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gascon128_core_round, .-gascon128_core_round + + .text +.global drysponge128_g + .type drysponge128_g, @function +drysponge128_g: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + subi r30,180 + sbci r31,255 + ld r19,Z + subi r30,76 + sbc r31,r1 + ldi r18,240 + std Z+40,r1 + std Z+41,r1 + std Z+42,r1 + std Z+43,r1 + std Z+44,r1 + std Z+45,r1 + std Z+46,r1 + std Z+47,r1 + std Z+48,r1 + std Z+49,r1 + std Z+50,r1 + std Z+51,r1 + std Z+52,r1 + std Z+53,r1 + std Z+54,r1 + std Z+55,r1 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 +38: + eor r4,r18 + ldd r12,Z+8 + ldd r13,Z+24 + ldd r14,Z+32 + eor r20,r14 + eor r4,r12 + eor r14,r13 + mov r15,r12 + mov r0,r20 + com r0 + and r15,r0 + mov r24,r4 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r4 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r20 + mov r0,r14 + com r0 + and r17,r0 + eor r20,r24 + eor r12,r25 + eor r4,r16 + eor r13,r17 + eor r14,r15 + eor r12,r20 + eor r13,r4 + eor r20,r14 + com r4 + st Z,r20 + std Z+8,r12 + std Z+24,r13 + std Z+32,r14 + ldd r12,Z+9 + ldd r13,Z+25 + ldd r14,Z+33 + eor r21,r14 + eor r5,r12 + eor r14,r13 + mov r15,r12 + mov r0,r21 + com r0 + and r15,r0 + mov r24,r5 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r5 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r21 + mov r0,r14 + com r0 + and r17,r0 + eor r21,r24 + eor r12,r25 + eor r5,r16 + eor r13,r17 + eor r14,r15 + eor r12,r21 + eor r13,r5 + eor r21,r14 + com r5 + std Z+1,r21 + std Z+9,r12 + std Z+25,r13 + std Z+33,r14 + ldd r12,Z+10 + ldd r13,Z+26 + ldd r14,Z+34 + eor r22,r14 + eor r6,r12 + eor r14,r13 + mov r15,r12 + mov r0,r22 + com r0 + and r15,r0 + mov r24,r6 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r6 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r22 + mov r0,r14 + com r0 + and r17,r0 + eor r22,r24 + eor r12,r25 + eor r6,r16 + eor r13,r17 + eor r14,r15 + eor r12,r22 + eor r13,r6 + eor r22,r14 + com r6 + std Z+2,r22 + std Z+10,r12 + std Z+26,r13 + std Z+34,r14 + ldd r12,Z+11 + ldd r13,Z+27 + ldd r14,Z+35 + eor r23,r14 + eor r7,r12 + eor r14,r13 + mov r15,r12 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r7 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r7 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r23 + mov r0,r14 + com r0 + and r17,r0 + eor r23,r24 + eor r12,r25 + eor r7,r16 + eor r13,r17 + eor r14,r15 + eor r12,r23 + eor r13,r7 + eor r23,r14 + com r7 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r14 + ldd r12,Z+12 + ldd r13,Z+28 + ldd r14,Z+36 + eor r26,r14 + eor r8,r12 + eor r14,r13 + mov r15,r12 + mov r0,r26 + com r0 + and r15,r0 + mov r24,r8 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r8 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r26 + mov r0,r14 + com r0 + and r17,r0 + eor r26,r24 + eor r12,r25 + eor r8,r16 + eor r13,r17 + eor r14,r15 + eor r12,r26 + eor r13,r8 + eor r26,r14 + com r8 + std Z+4,r26 + std Z+12,r12 + std Z+28,r13 + std Z+36,r14 + ldd r12,Z+13 + ldd r13,Z+29 + ldd r14,Z+37 + eor r27,r14 + eor r9,r12 + eor r14,r13 + mov r15,r12 + mov r0,r27 + com r0 + and r15,r0 + mov r24,r9 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r9 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r27 + mov r0,r14 + com r0 + and r17,r0 + eor r27,r24 + eor r12,r25 + eor r9,r16 + eor r13,r17 + eor r14,r15 + eor r12,r27 + eor r13,r9 + eor r27,r14 + com r9 + std Z+5,r27 + std Z+13,r12 + std Z+29,r13 + std Z+37,r14 + ldd r12,Z+14 + ldd r13,Z+30 + ldd r14,Z+38 + eor r2,r14 + eor r10,r12 + eor r14,r13 + mov r15,r12 + mov r0,r2 + com r0 + and r15,r0 + mov r24,r10 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r10 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r2 + mov r0,r14 + com r0 + and r17,r0 + eor r2,r24 + eor r12,r25 + eor r10,r16 + eor r13,r17 + eor r14,r15 + eor r12,r2 + eor r13,r10 + eor r2,r14 + com r10 + std Z+6,r2 + std Z+14,r12 + std Z+30,r13 + std Z+38,r14 + ldd r12,Z+15 + ldd r13,Z+31 + ldd r14,Z+39 + eor r3,r14 + eor r11,r12 + eor r14,r13 + mov r15,r12 + mov r0,r3 + com r0 + and r15,r0 + mov r24,r11 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r11 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r3 + mov r0,r14 + com r0 + and r17,r0 + eor r3,r24 + eor r12,r25 + eor r11,r16 + eor r13,r17 + eor r14,r15 + eor r12,r3 + eor r13,r11 + eor r3,r14 + com r11 + std Z+7,r3 + std Z+15,r12 + std Z+31,r13 + std Z+39,r14 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r20,Z+24 + ldd r21,Z+25 + ldd r22,Z+26 + ldd r23,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r20 + std Z+25,r21 + std Z+26,r22 + std Z+27,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r22,Z+34 + ldd r23,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r20 + std Z+33,r21 + std Z+34,r22 + std Z+35,r23 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + ldd r12,Z+40 + ldd r13,Z+41 + ldd r14,Z+42 + ldd r15,Z+43 + eor r12,r20 + eor r13,r21 + eor r14,r22 + eor r15,r23 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + std Z+40,r12 + std Z+41,r13 + std Z+42,r14 + std Z+43,r15 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + ldd r0,Z+24 + eor r12,r0 + ldd r0,Z+25 + eor r13,r0 + ldd r0,Z+26 + eor r14,r0 + ldd r0,Z+27 + eor r15,r0 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ldd r12,Z+48 + ldd r13,Z+49 + ldd r14,Z+50 + ldd r15,Z+51 + ldd r0,Z+8 + eor r12,r0 + ldd r0,Z+9 + eor r13,r0 + ldd r0,Z+10 + eor r14,r0 + ldd r0,Z+11 + eor r15,r0 + ldd r0,Z+28 + eor r12,r0 + ldd r0,Z+29 + eor r13,r0 + ldd r0,Z+30 + eor r14,r0 + ldd r0,Z+31 + eor r15,r0 + std Z+48,r12 + std Z+49,r13 + std Z+50,r14 + std Z+51,r15 + ldd r12,Z+52 + ldd r13,Z+53 + ldd r14,Z+54 + ldd r15,Z+55 + ldd r0,Z+12 + eor r12,r0 + ldd r0,Z+13 + eor r13,r0 + ldd r0,Z+14 + eor r14,r0 + ldd r0,Z+15 + eor r15,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + std Z+52,r12 + std Z+53,r13 + std Z+54,r14 + std Z+55,r15 + subi r18,15 + dec r19 + breq 5904f + rjmp 38b +5904: + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size drysponge128_g, .-drysponge128_g + + .text +.global gascon256_core_round + .type gascon256_core_round, @function +gascon256_core_round: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 26 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r26,Z+ + ld r27,Z+ + ld r2,Z+ + ld r3,Z+ + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + eor r4,r22 + ld r22,Z + ldd r23,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r23,r22 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r22 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r18 + eor r12,r23 + eor r13,r4 + eor r15,r14 + eor r18,r24 + com r4 + std Y+1,r18 + st Z,r22 + std Z+8,r23 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r22,Z+1 + ldd r23,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r23,r22 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r22 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r19 + eor r12,r23 + eor r13,r5 + eor r15,r14 + eor r19,r24 + com r5 + std Y+2,r19 + std Z+1,r22 + std Z+9,r23 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r22,Z+2 + ldd r23,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r23,r22 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r22 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r20 + eor r12,r23 + eor r13,r6 + eor r15,r14 + eor r20,r24 + com r6 + std Y+3,r20 + std Z+2,r22 + std Z+10,r23 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r22,Z+3 + ldd r23,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r23,r22 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r22 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r21 + eor r12,r23 + eor r13,r7 + eor r15,r14 + eor r21,r24 + com r7 + std Y+4,r21 + std Z+3,r22 + std Z+11,r23 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r22,Z+4 + ldd r23,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r26,r24 + eor r23,r22 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r22 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r26 + eor r12,r23 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+5,r26 + std Z+4,r22 + std Z+12,r23 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r22,Z+5 + ldd r23,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r27,r24 + eor r23,r22 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r22 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r27 + eor r12,r23 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+6,r27 + std Z+5,r22 + std Z+13,r23 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r22,Z+6 + ldd r23,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r2,r24 + eor r23,r22 + eor r10,r12 + eor r14,r13 + eor r24,r15 + mov r17,r2 + mov r25,r22 + mov r0,r2 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r10 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r10 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r2 + eor r12,r23 + eor r13,r10 + eor r15,r14 + eor r2,r24 + com r10 + std Y+7,r2 + std Z+6,r22 + std Z+14,r23 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r22,Z+7 + ldd r23,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r3,r24 + eor r23,r22 + eor r11,r12 + eor r14,r13 + eor r24,r15 + mov r17,r3 + mov r25,r22 + mov r0,r3 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r11 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r11 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r3 + eor r12,r23 + eor r13,r11 + eor r15,r14 + eor r3,r24 + com r11 + std Y+8,r3 + std Z+7,r22 + std Z+15,r23 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + bst r22,0 + lsr r13 + ror r12 + ror r23 + ror r22 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r26 + std Z+21,r27 + std Z+22,r2 + std Z+23,r3 + movw r22,r4 + movw r12,r6 + movw r14,r8 + movw r24,r10 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r4 + eor r15,r5 + eor r24,r6 + eor r25,r7 + eor r22,r8 + eor r23,r9 + eor r12,r10 + eor r13,r11 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r14 + eor r5,r15 + eor r6,r24 + eor r7,r25 + eor r8,r22 + eor r9,r23 + eor r10,r12 + eor r11,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r12 + mov r12,r0 + mov r0,r23 + mov r23,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r26 + std Z+45,r27 + std Z+46,r2 + std Z+47,r3 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r26,Z+52 + ldd r27,Z+53 + ldd r2,Z+54 + ldd r3,Z+55 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r26 + std Z+53,r27 + std Z+54,r2 + std Z+55,r3 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r26,Z+60 + ldd r27,Z+61 + ldd r2,Z+62 + ldd r3,Z+63 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r26 + std Z+61,r27 + std Z+62,r2 + std Z+63,r3 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r2,Y+7 + ldd r3,Y+8 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+24,r4 + std Z+25,r5 + std Z+26,r6 + std Z+27,r7 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + st -Z,r3 + st -Z,r2 + st -Z,r27 + st -Z,r26 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + adiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gascon256_core_round, .-gascon256_core_round + + .text +.global drysponge256_g + .type drysponge256_g, @function +drysponge256_g: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 44 + subi r30,148 + sbci r31,255 + ld r19,Z + subi r30,108 + sbc r31,r1 + ldi r18,240 + std Y+25,r19 + std Y+26,r18 + std Y+9,r1 + std Y+10,r1 + std Y+11,r1 + std Y+12,r1 + std Y+13,r1 + std Y+14,r1 + std Y+15,r1 + std Y+16,r1 + std Y+17,r1 + std Y+18,r1 + std Y+19,r1 + std Y+20,r1 + std Y+21,r1 + std Y+22,r1 + std Y+23,r1 + std Y+24,r1 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r22,Z+ + ld r23,Z+ + ld r26,Z+ + ld r27,Z+ + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 +40: + ldd r24,Y+26 + eor r2,r24 + subi r24,15 + std Y+26,r24 + ld r10,Z + ldd r11,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r11,r10 + eor r2,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r10 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r2 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r2 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r18 + eor r12,r11 + eor r13,r2 + eor r15,r14 + eor r18,r24 + com r2 + std Y+1,r18 + st Z,r10 + std Z+8,r11 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r10,Z+1 + ldd r11,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r11,r10 + eor r3,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r10 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r3 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r3 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r19 + eor r12,r11 + eor r13,r3 + eor r15,r14 + eor r19,r24 + com r3 + std Y+2,r19 + std Z+1,r10 + std Z+9,r11 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r10,Z+2 + ldd r11,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r11,r10 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r10 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r20 + eor r12,r11 + eor r13,r4 + eor r15,r14 + eor r20,r24 + com r4 + std Y+3,r20 + std Z+2,r10 + std Z+10,r11 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r10,Z+3 + ldd r11,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r11,r10 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r10 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r21 + eor r12,r11 + eor r13,r5 + eor r15,r14 + eor r21,r24 + com r5 + std Y+4,r21 + std Z+3,r10 + std Z+11,r11 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r10,Z+4 + ldd r11,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r22,r24 + eor r11,r10 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r22 + mov r25,r10 + mov r0,r22 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r22 + eor r12,r11 + eor r13,r6 + eor r15,r14 + eor r22,r24 + com r6 + std Y+5,r22 + std Z+4,r10 + std Z+12,r11 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r10,Z+5 + ldd r11,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r23,r24 + eor r11,r10 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r23 + mov r25,r10 + mov r0,r23 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r23 + eor r12,r11 + eor r13,r7 + eor r15,r14 + eor r23,r24 + com r7 + std Y+6,r23 + std Z+5,r10 + std Z+13,r11 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r10,Z+6 + ldd r11,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r26,r24 + eor r11,r10 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r10 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r26 + eor r12,r11 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+7,r26 + std Z+6,r10 + std Z+14,r11 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r10,Z+7 + ldd r11,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r27,r24 + eor r11,r10 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r10 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r27 + eor r12,r11 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+8,r27 + std Z+7,r10 + std Z+15,r11 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + bst r10,0 + lsr r13 + ror r12 + ror r11 + ror r10 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r22 + std Z+13,r23 + std Z+14,r26 + std Z+15,r27 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r26,Z+22 + ldd r27,Z+23 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r23 + mov r23,r26 + mov r26,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r22 + std Z+21,r23 + std Z+22,r26 + std Z+23,r27 + movw r10,r2 + movw r12,r4 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + mov r0,r2 + mov r2,r4 + mov r4,r0 + mov r0,r3 + mov r3,r5 + mov r5,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + or r5,r0 + mov r0,r6 + mov r6,r8 + mov r8,r0 + mov r0,r7 + mov r7,r9 + mov r9,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r6,r10 + eor r7,r11 + eor r8,r12 + eor r9,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r26,Z+38 + ldd r27,Z+39 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r12 + mov r12,r0 + mov r0,r11 + mov r11,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r22 + std Z+37,r23 + std Z+38,r26 + std Z+39,r27 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r22,Z+44 + ldd r23,Z+45 + ldd r26,Z+46 + ldd r27,Z+47 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r22 + std Z+45,r23 + std Z+46,r26 + std Z+47,r27 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r22,Z+52 + ldd r23,Z+53 + ldd r26,Z+54 + ldd r27,Z+55 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r22 + std Z+53,r23 + std Z+54,r26 + std Z+55,r27 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r22,Z+60 + ldd r23,Z+61 + ldd r26,Z+62 + ldd r27,Z+63 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r22 + std Z+61,r23 + std Z+62,r26 + std Z+63,r27 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + ldd r10,Y+9 + ldd r11,Y+10 + ldd r12,Y+11 + ldd r13,Y+12 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + ldd r0,Z+12 + eor r10,r0 + ldd r0,Z+13 + eor r11,r0 + ldd r0,Z+14 + eor r12,r0 + ldd r0,Z+15 + eor r13,r0 + ldd r0,Z+32 + eor r10,r0 + ldd r0,Z+33 + eor r11,r0 + ldd r0,Z+34 + eor r12,r0 + ldd r0,Z+35 + eor r13,r0 + ldd r0,Z+52 + eor r10,r0 + ldd r0,Z+53 + eor r11,r0 + ldd r0,Z+54 + eor r12,r0 + ldd r0,Z+55 + eor r13,r0 + std Y+9,r10 + std Y+10,r11 + std Y+11,r12 + std Y+12,r13 + ldd r10,Y+13 + ldd r11,Y+14 + ldd r12,Y+15 + ldd r13,Y+16 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + ldd r0,Z+16 + eor r10,r0 + ldd r0,Z+17 + eor r11,r0 + ldd r0,Z+18 + eor r12,r0 + ldd r0,Z+19 + eor r13,r0 + ldd r0,Z+36 + eor r10,r0 + ldd r0,Z+37 + eor r11,r0 + ldd r0,Z+38 + eor r12,r0 + ldd r0,Z+39 + eor r13,r0 + ldd r0,Z+40 + eor r10,r0 + ldd r0,Z+41 + eor r11,r0 + ldd r0,Z+42 + eor r12,r0 + ldd r0,Z+43 + eor r13,r0 + std Y+13,r10 + std Y+14,r11 + std Y+15,r12 + std Y+16,r13 + ldd r10,Y+17 + ldd r11,Y+18 + ldd r12,Y+19 + ldd r13,Y+20 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + ld r0,Z + eor r10,r0 + ldd r0,Z+1 + eor r11,r0 + ldd r0,Z+2 + eor r12,r0 + ldd r0,Z+3 + eor r13,r0 + ldd r0,Z+20 + eor r10,r0 + ldd r0,Z+21 + eor r11,r0 + ldd r0,Z+22 + eor r12,r0 + ldd r0,Z+23 + eor r13,r0 + ldd r0,Z+44 + eor r10,r0 + ldd r0,Z+45 + eor r11,r0 + ldd r0,Z+46 + eor r12,r0 + ldd r0,Z+47 + eor r13,r0 + std Y+17,r10 + std Y+18,r11 + std Y+19,r12 + std Y+20,r13 + ldd r10,Y+21 + ldd r11,Y+22 + ldd r12,Y+23 + ldd r13,Y+24 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + ldd r0,Z+4 + eor r10,r0 + ldd r0,Z+5 + eor r11,r0 + ldd r0,Z+6 + eor r12,r0 + ldd r0,Z+7 + eor r13,r0 + ldd r0,Z+8 + eor r10,r0 + ldd r0,Z+9 + eor r11,r0 + ldd r0,Z+10 + eor r12,r0 + ldd r0,Z+11 + eor r13,r0 + ldd r0,Z+48 + eor r10,r0 + ldd r0,Z+49 + eor r11,r0 + ldd r0,Z+50 + eor r12,r0 + ldd r0,Z+51 + eor r13,r0 + std Y+21,r10 + std Y+22,r11 + std Y+23,r12 + std Y+24,r13 + ldd r10,Y+25 + dec r10 + std Y+25,r10 + breq 6623f + rjmp 40b +6623: + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + std Z+28,r6 + std Z+29,r7 + std Z+30,r8 + std Z+31,r9 + st -Z,r27 + st -Z,r26 + st -Z,r23 + st -Z,r22 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + ldi r25,72 + add r30,r25 + adc r31,r1 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + ldd r2,Y+17 + ldd r3,Y+18 + ldd r4,Y+19 + ldd r5,Y+20 + ldd r6,Y+21 + ldd r7,Y+22 + ldd r8,Y+23 + ldd r9,Y+24 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + std Z+12,r6 + std Z+13,r7 + std Z+14,r8 + std Z+15,r9 + adiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size drysponge256_g, .-drysponge256_g + +#endif diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge.c b/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge.c index 67f1b27..6dfe48c 100644 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge.c +++ b/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-drysponge.c @@ -23,6 +23,8 @@ #include "internal-drysponge.h" #include +#if !defined(__AVR__) + /* Right rotations in bit-interleaved format */ #define intRightRotateEven(x,bits) \ (__extension__ ({ \ @@ -289,6 +291,8 @@ void drysponge256_g(drysponge256_state_t *state) } } +#endif /* !__AVR__ */ + void drysponge128_g_core(drysponge128_state_t *state) { unsigned round; @@ -304,6 +308,7 @@ void drysponge256_g_core(drysponge256_state_t *state) } /** + * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) * \brief Selects an element of x in constant time. * * \param x Points to the four elements of x. @@ -311,6 +316,7 @@ void drysponge256_g_core(drysponge256_state_t *state) * * \return The selected element of x. */ +#if !defined(__AVR__) STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) { /* We need to be careful how we select each element of x because @@ -340,6 +346,11 @@ STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); return result ^ (x[3] & mask); } +#else +/* AVR is more or less immune to cache timing issues because it doesn't + * have anything like an L1 or L2 cache. Select the word directly */ +#define drysponge_select_x(x, index) ((x)[(index)]) +#endif /** * \brief Mixes a 32-bit value into the DrySPONGE128 state. diff --git a/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-util.h b/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-util.h index e79158c..e30166d 100644 --- a/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-util.h +++ b/drygascon/Implementations/crypto_aead/drygascon256/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/api.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.c deleted file mode 100644 index e963903..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.c +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "drygascon.h" -#include "internal-drysponge.h" -#include - -aead_cipher_t const drygascon128_cipher = { - "DryGASCON128", - DRYGASCON128_KEY_SIZE, - DRYGASCON128_NONCE_SIZE, - DRYGASCON128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_aead_encrypt, - drygascon128_aead_decrypt -}; - -aead_cipher_t const drygascon256_cipher = { - "DryGASCON256", - DRYGASCON256_KEY_SIZE, - DRYGASCON256_NONCE_SIZE, - DRYGASCON256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_aead_encrypt, - drygascon256_aead_decrypt -}; - -aead_hash_algorithm_t const drygascon128_hash_algorithm = { - "DryGASCON128-HASH", - sizeof(int), - DRYGASCON128_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const drygascon256_hash_algorithm = { - "DryGASCON256-HASH", - sizeof(int), - DRYGASCON256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Processes associated data for DryGASCON128. - * - * \param state DrySPONGE128 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon128_process_ad - (drysponge128_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(state, ad, DRYSPONGE128_RATE); - drysponge128_g_core(state); - ad += DRYSPONGE128_RATE; - adlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN128_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN128_FINAL; - if (adlen < DRYSPONGE128_RATE) - state->domain |= DRYDOMAIN128_PADDED; - drysponge128_f_absorb(state, ad, (unsigned)adlen); - drysponge128_g(state); -} - -/** - * \brief Processes associated data for DryGASCON256. - * - * \param state DrySPONGE256 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon256_process_ad - (drysponge256_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(state, ad, DRYSPONGE256_RATE); - drysponge256_g_core(state); - ad += DRYSPONGE256_RATE; - adlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN256_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN256_FINAL; - if (adlen < DRYSPONGE256_RATE) - state->domain |= DRYDOMAIN256_PADDED; - drysponge256_f_absorb(state, ad, (unsigned)adlen); - drysponge256_g(state); -} - -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge128_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - mlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (mlen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)mlen; - drysponge128_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge128_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, DRYGASCON128_TAG_SIZE); - return 0; -} - -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON128_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON128_TAG_SIZE; - drysponge128_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE128_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE128_RATE); - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - clen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (clen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge128_f_absorb(&state, m, temp); - drysponge128_g(&state); - c += temp; - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state.r.B, c, DRYGASCON128_TAG_SIZE); -} - -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge256_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - mlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (mlen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)mlen; - drysponge256_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge256_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, 16); - drysponge256_g(&state); - memcpy(c + 16, state.r.B, 16); - return 0; -} - -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned char *mtemp = m; - unsigned temp; - int result; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON256_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON256_TAG_SIZE; - drysponge256_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE256_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE256_RATE); - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - clen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (clen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge256_f_absorb(&state, m, temp); - drysponge256_g(&state); - c += temp; - } - - /* Check the authentication tag which is split into two pieces */ - result = aead_check_tag(0, 0, state.r.B, c, 16); - drysponge256_g(&state); - return aead_check_tag_precheck - (mtemp, *mlen, state.r.B, c + 16, 16, ~result); -} - -/** - * \brief Precomputed initialization vector for DryGASCON128-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE128. - */ -static unsigned char const drygascon128_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89 -}; - -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge128_state_t state; - memcpy(state.c.B, drygascon128_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon128_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE128_ROUNDS; - drygascon128_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge128_g(&state); - memcpy(out + 16, state.r.B, 16); - return 0; -} - -/** - * \brief Precomputed initialization vector for DryGASCON256-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE256. - */ -static unsigned char const drygascon256_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0x45, 0x28, 0x21, 0xe6, 0x38, 0xd0, 0x13, 0x77, - 0xbe, 0x54, 0x66, 0xcf, 0x34, 0xe9, 0x0c, 0x6c -}; - -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge256_state_t state; - memcpy(state.c.B, drygascon256_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon256_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE256_ROUNDS; - drygascon256_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 16, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 32, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 48, state.r.B, 16); - return 0; -} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.h deleted file mode 100644 index 12e18c3..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/drygascon.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_DRYGASCON_H -#define LWCRYPTO_DRYGASCON_H - -#include "aead-common.h" - -/** - * \file drygascon.h - * \brief DryGASCON authenticated encryption algorithm. - * - * DryGASCON is a family of authenticated encryption algorithms based - * around a generalised version of the ASCON permutation. DryGASCON - * is designed to provide some protection against power analysis. - * - * There are four algorithms in the DryGASCON family: - * - * \li DryGASCON128 is an authenticated encryption algorithm with a - * 128-bit key, a 128-bit nonce, and a 128-bit authentication tag. - * \li DryGASCON256 is an authenticated encryption algorithm with a - * 256-bit key, a 128-bit nonce, and a 128-256 authentication tag. - * \li DryGASCON128-HASH is a hash algorithm with a 256-bit output. - * \li DryGASCON256-HASH is a hash algorithm with a 512-bit output. - * - * DryGASCON128 and DryGASCON128-HASH are the primary members of the family. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for DryGASCON128. - */ -#define DRYGASCON128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for DryGASCON128. - */ -#define DRYGASCON128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for DryGASCON128. - */ -#define DRYGASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON128-HASH. - */ -#define DRYGASCON128_HASH_SIZE 32 - -/** - * \brief Size of the key for DryGASCON256. - */ -#define DRYGASCON256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for DryGASCON256. - */ -#define DRYGASCON256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for DryGASCON256. - */ -#define DRYGASCON256_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON256-HASH. - */ -#define DRYGASCON256_HASH_SIZE 64 - -/** - * \brief Meta-information block for the DryGASCON128 cipher. - */ -extern aead_cipher_t const drygascon128_cipher; - -/** - * \brief Meta-information block for the DryGASCON256 cipher. - */ -extern aead_cipher_t const drygascon256_cipher; - -/** - * \brief Meta-information block for DryGASCON128-HASH. - */ -extern aead_hash_algorithm_t const drygascon128_hash_algorithm; - -/** - * \brief Meta-information block for DryGASCON256-HASH. - */ -extern aead_hash_algorithm_t const drygascon256_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with DryGASCON128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon128_aead_decrypt() - */ -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon128_aead_encrypt() - */ -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with DryGASCON256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon256_aead_decrypt() - */ -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon256_aead_encrypt() - */ -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with DRYGASCON128. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON128_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with DRYGASCON256. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/hash.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/hash.c deleted file mode 100644 index 34464d6..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "drygascon.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return drygascon128_hash(out, in, inlen); -} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge-avr.S b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge-avr.S deleted file mode 100644 index 84d0ff8..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge-avr.S +++ /dev/null @@ -1,5092 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gascon128_core_round - .type gascon128_core_round, @function -gascon128_core_round: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - eor r4,r22 - ldd r23,Z+8 - ldd r12,Z+24 - ldd r13,Z+32 - eor r18,r13 - eor r4,r23 - eor r13,r12 - mov r14,r23 - mov r0,r18 - com r0 - and r14,r0 - mov r15,r4 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r4 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r18 - mov r0,r13 - com r0 - and r16,r0 - eor r18,r15 - eor r23,r24 - eor r4,r25 - eor r12,r16 - eor r13,r14 - eor r23,r18 - eor r12,r4 - eor r18,r13 - com r4 - st Z,r18 - std Z+8,r23 - std Z+24,r12 - std Z+32,r13 - ldd r23,Z+9 - ldd r12,Z+25 - ldd r13,Z+33 - eor r19,r13 - eor r5,r23 - eor r13,r12 - mov r14,r23 - mov r0,r19 - com r0 - and r14,r0 - mov r15,r5 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r5 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r19 - mov r0,r13 - com r0 - and r16,r0 - eor r19,r15 - eor r23,r24 - eor r5,r25 - eor r12,r16 - eor r13,r14 - eor r23,r19 - eor r12,r5 - eor r19,r13 - com r5 - std Z+1,r19 - std Z+9,r23 - std Z+25,r12 - std Z+33,r13 - ldd r23,Z+10 - ldd r12,Z+26 - ldd r13,Z+34 - eor r20,r13 - eor r6,r23 - eor r13,r12 - mov r14,r23 - mov r0,r20 - com r0 - and r14,r0 - mov r15,r6 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r6 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r20 - mov r0,r13 - com r0 - and r16,r0 - eor r20,r15 - eor r23,r24 - eor r6,r25 - eor r12,r16 - eor r13,r14 - eor r23,r20 - eor r12,r6 - eor r20,r13 - com r6 - std Z+2,r20 - std Z+10,r23 - std Z+26,r12 - std Z+34,r13 - ldd r23,Z+11 - ldd r12,Z+27 - ldd r13,Z+35 - eor r21,r13 - eor r7,r23 - eor r13,r12 - mov r14,r23 - mov r0,r21 - com r0 - and r14,r0 - mov r15,r7 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r7 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r21 - mov r0,r13 - com r0 - and r16,r0 - eor r21,r15 - eor r23,r24 - eor r7,r25 - eor r12,r16 - eor r13,r14 - eor r23,r21 - eor r12,r7 - eor r21,r13 - com r7 - std Z+3,r21 - std Z+11,r23 - std Z+27,r12 - std Z+35,r13 - ldd r23,Z+12 - ldd r12,Z+28 - ldd r13,Z+36 - eor r26,r13 - eor r8,r23 - eor r13,r12 - mov r14,r23 - mov r0,r26 - com r0 - and r14,r0 - mov r15,r8 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r8 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r26 - mov r0,r13 - com r0 - and r16,r0 - eor r26,r15 - eor r23,r24 - eor r8,r25 - eor r12,r16 - eor r13,r14 - eor r23,r26 - eor r12,r8 - eor r26,r13 - com r8 - std Z+4,r26 - std Z+12,r23 - std Z+28,r12 - std Z+36,r13 - ldd r23,Z+13 - ldd r12,Z+29 - ldd r13,Z+37 - eor r27,r13 - eor r9,r23 - eor r13,r12 - mov r14,r23 - mov r0,r27 - com r0 - and r14,r0 - mov r15,r9 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r9 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r27 - mov r0,r13 - com r0 - and r16,r0 - eor r27,r15 - eor r23,r24 - eor r9,r25 - eor r12,r16 - eor r13,r14 - eor r23,r27 - eor r12,r9 - eor r27,r13 - com r9 - std Z+5,r27 - std Z+13,r23 - std Z+29,r12 - std Z+37,r13 - ldd r23,Z+14 - ldd r12,Z+30 - ldd r13,Z+38 - eor r2,r13 - eor r10,r23 - eor r13,r12 - mov r14,r23 - mov r0,r2 - com r0 - and r14,r0 - mov r15,r10 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r10 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r2 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r15 - eor r23,r24 - eor r10,r25 - eor r12,r16 - eor r13,r14 - eor r23,r2 - eor r12,r10 - eor r2,r13 - com r10 - std Z+6,r2 - std Z+14,r23 - std Z+30,r12 - std Z+38,r13 - ldd r23,Z+15 - ldd r12,Z+31 - ldd r13,Z+39 - eor r3,r13 - eor r11,r23 - eor r13,r12 - mov r14,r23 - mov r0,r3 - com r0 - and r14,r0 - mov r15,r11 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r11 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r3 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r15 - eor r23,r24 - eor r11,r25 - eor r12,r16 - eor r13,r14 - eor r23,r3 - eor r12,r11 - eor r3,r13 - com r11 - std Z+7,r3 - std Z+15,r23 - std Z+31,r12 - std Z+39,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r18 - std Z+25,r19 - std Z+26,r20 - std Z+27,r21 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gascon128_core_round, .-gascon128_core_round - - .text -.global drysponge128_g - .type drysponge128_g, @function -drysponge128_g: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - subi r30,180 - sbci r31,255 - ld r19,Z - subi r30,76 - sbc r31,r1 - ldi r18,240 - std Z+40,r1 - std Z+41,r1 - std Z+42,r1 - std Z+43,r1 - std Z+44,r1 - std Z+45,r1 - std Z+46,r1 - std Z+47,r1 - std Z+48,r1 - std Z+49,r1 - std Z+50,r1 - std Z+51,r1 - std Z+52,r1 - std Z+53,r1 - std Z+54,r1 - std Z+55,r1 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 -38: - eor r4,r18 - ldd r12,Z+8 - ldd r13,Z+24 - ldd r14,Z+32 - eor r20,r14 - eor r4,r12 - eor r14,r13 - mov r15,r12 - mov r0,r20 - com r0 - and r15,r0 - mov r24,r4 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r4 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r20 - mov r0,r14 - com r0 - and r17,r0 - eor r20,r24 - eor r12,r25 - eor r4,r16 - eor r13,r17 - eor r14,r15 - eor r12,r20 - eor r13,r4 - eor r20,r14 - com r4 - st Z,r20 - std Z+8,r12 - std Z+24,r13 - std Z+32,r14 - ldd r12,Z+9 - ldd r13,Z+25 - ldd r14,Z+33 - eor r21,r14 - eor r5,r12 - eor r14,r13 - mov r15,r12 - mov r0,r21 - com r0 - and r15,r0 - mov r24,r5 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r5 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r21 - mov r0,r14 - com r0 - and r17,r0 - eor r21,r24 - eor r12,r25 - eor r5,r16 - eor r13,r17 - eor r14,r15 - eor r12,r21 - eor r13,r5 - eor r21,r14 - com r5 - std Z+1,r21 - std Z+9,r12 - std Z+25,r13 - std Z+33,r14 - ldd r12,Z+10 - ldd r13,Z+26 - ldd r14,Z+34 - eor r22,r14 - eor r6,r12 - eor r14,r13 - mov r15,r12 - mov r0,r22 - com r0 - and r15,r0 - mov r24,r6 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r6 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r22 - mov r0,r14 - com r0 - and r17,r0 - eor r22,r24 - eor r12,r25 - eor r6,r16 - eor r13,r17 - eor r14,r15 - eor r12,r22 - eor r13,r6 - eor r22,r14 - com r6 - std Z+2,r22 - std Z+10,r12 - std Z+26,r13 - std Z+34,r14 - ldd r12,Z+11 - ldd r13,Z+27 - ldd r14,Z+35 - eor r23,r14 - eor r7,r12 - eor r14,r13 - mov r15,r12 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r7 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r7 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r23 - mov r0,r14 - com r0 - and r17,r0 - eor r23,r24 - eor r12,r25 - eor r7,r16 - eor r13,r17 - eor r14,r15 - eor r12,r23 - eor r13,r7 - eor r23,r14 - com r7 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r14 - ldd r12,Z+12 - ldd r13,Z+28 - ldd r14,Z+36 - eor r26,r14 - eor r8,r12 - eor r14,r13 - mov r15,r12 - mov r0,r26 - com r0 - and r15,r0 - mov r24,r8 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r8 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r26 - mov r0,r14 - com r0 - and r17,r0 - eor r26,r24 - eor r12,r25 - eor r8,r16 - eor r13,r17 - eor r14,r15 - eor r12,r26 - eor r13,r8 - eor r26,r14 - com r8 - std Z+4,r26 - std Z+12,r12 - std Z+28,r13 - std Z+36,r14 - ldd r12,Z+13 - ldd r13,Z+29 - ldd r14,Z+37 - eor r27,r14 - eor r9,r12 - eor r14,r13 - mov r15,r12 - mov r0,r27 - com r0 - and r15,r0 - mov r24,r9 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r9 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r27 - mov r0,r14 - com r0 - and r17,r0 - eor r27,r24 - eor r12,r25 - eor r9,r16 - eor r13,r17 - eor r14,r15 - eor r12,r27 - eor r13,r9 - eor r27,r14 - com r9 - std Z+5,r27 - std Z+13,r12 - std Z+29,r13 - std Z+37,r14 - ldd r12,Z+14 - ldd r13,Z+30 - ldd r14,Z+38 - eor r2,r14 - eor r10,r12 - eor r14,r13 - mov r15,r12 - mov r0,r2 - com r0 - and r15,r0 - mov r24,r10 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r10 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r2 - mov r0,r14 - com r0 - and r17,r0 - eor r2,r24 - eor r12,r25 - eor r10,r16 - eor r13,r17 - eor r14,r15 - eor r12,r2 - eor r13,r10 - eor r2,r14 - com r10 - std Z+6,r2 - std Z+14,r12 - std Z+30,r13 - std Z+38,r14 - ldd r12,Z+15 - ldd r13,Z+31 - ldd r14,Z+39 - eor r3,r14 - eor r11,r12 - eor r14,r13 - mov r15,r12 - mov r0,r3 - com r0 - and r15,r0 - mov r24,r11 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r11 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r3 - mov r0,r14 - com r0 - and r17,r0 - eor r3,r24 - eor r12,r25 - eor r11,r16 - eor r13,r17 - eor r14,r15 - eor r12,r3 - eor r13,r11 - eor r3,r14 - com r11 - std Z+7,r3 - std Z+15,r12 - std Z+31,r13 - std Z+39,r14 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r20,Z+24 - ldd r21,Z+25 - ldd r22,Z+26 - ldd r23,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r20 - std Z+25,r21 - std Z+26,r22 - std Z+27,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r22,Z+34 - ldd r23,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r20 - std Z+33,r21 - std Z+34,r22 - std Z+35,r23 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - ldd r12,Z+40 - ldd r13,Z+41 - ldd r14,Z+42 - ldd r15,Z+43 - eor r12,r20 - eor r13,r21 - eor r14,r22 - eor r15,r23 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - std Z+40,r12 - std Z+41,r13 - std Z+42,r14 - std Z+43,r15 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - ldd r0,Z+24 - eor r12,r0 - ldd r0,Z+25 - eor r13,r0 - ldd r0,Z+26 - eor r14,r0 - ldd r0,Z+27 - eor r15,r0 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ldd r12,Z+48 - ldd r13,Z+49 - ldd r14,Z+50 - ldd r15,Z+51 - ldd r0,Z+8 - eor r12,r0 - ldd r0,Z+9 - eor r13,r0 - ldd r0,Z+10 - eor r14,r0 - ldd r0,Z+11 - eor r15,r0 - ldd r0,Z+28 - eor r12,r0 - ldd r0,Z+29 - eor r13,r0 - ldd r0,Z+30 - eor r14,r0 - ldd r0,Z+31 - eor r15,r0 - std Z+48,r12 - std Z+49,r13 - std Z+50,r14 - std Z+51,r15 - ldd r12,Z+52 - ldd r13,Z+53 - ldd r14,Z+54 - ldd r15,Z+55 - ldd r0,Z+12 - eor r12,r0 - ldd r0,Z+13 - eor r13,r0 - ldd r0,Z+14 - eor r14,r0 - ldd r0,Z+15 - eor r15,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - std Z+52,r12 - std Z+53,r13 - std Z+54,r14 - std Z+55,r15 - subi r18,15 - dec r19 - breq 5904f - rjmp 38b -5904: - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size drysponge128_g, .-drysponge128_g - - .text -.global gascon256_core_round - .type gascon256_core_round, @function -gascon256_core_round: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 26 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r26,Z+ - ld r27,Z+ - ld r2,Z+ - ld r3,Z+ - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - eor r4,r22 - ld r22,Z - ldd r23,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r23,r22 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r22 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r18 - eor r12,r23 - eor r13,r4 - eor r15,r14 - eor r18,r24 - com r4 - std Y+1,r18 - st Z,r22 - std Z+8,r23 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r22,Z+1 - ldd r23,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r23,r22 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r22 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r19 - eor r12,r23 - eor r13,r5 - eor r15,r14 - eor r19,r24 - com r5 - std Y+2,r19 - std Z+1,r22 - std Z+9,r23 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r22,Z+2 - ldd r23,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r23,r22 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r22 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r20 - eor r12,r23 - eor r13,r6 - eor r15,r14 - eor r20,r24 - com r6 - std Y+3,r20 - std Z+2,r22 - std Z+10,r23 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r22,Z+3 - ldd r23,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r23,r22 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r22 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r21 - eor r12,r23 - eor r13,r7 - eor r15,r14 - eor r21,r24 - com r7 - std Y+4,r21 - std Z+3,r22 - std Z+11,r23 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r22,Z+4 - ldd r23,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r26,r24 - eor r23,r22 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r22 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r26 - eor r12,r23 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+5,r26 - std Z+4,r22 - std Z+12,r23 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r22,Z+5 - ldd r23,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r27,r24 - eor r23,r22 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r22 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r27 - eor r12,r23 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+6,r27 - std Z+5,r22 - std Z+13,r23 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r22,Z+6 - ldd r23,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r2,r24 - eor r23,r22 - eor r10,r12 - eor r14,r13 - eor r24,r15 - mov r17,r2 - mov r25,r22 - mov r0,r2 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r10 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r10 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r2 - eor r12,r23 - eor r13,r10 - eor r15,r14 - eor r2,r24 - com r10 - std Y+7,r2 - std Z+6,r22 - std Z+14,r23 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r22,Z+7 - ldd r23,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r3,r24 - eor r23,r22 - eor r11,r12 - eor r14,r13 - eor r24,r15 - mov r17,r3 - mov r25,r22 - mov r0,r3 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r11 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r11 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r3 - eor r12,r23 - eor r13,r11 - eor r15,r14 - eor r3,r24 - com r11 - std Y+8,r3 - std Z+7,r22 - std Z+15,r23 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - bst r22,0 - lsr r13 - ror r12 - ror r23 - ror r22 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r26 - std Z+21,r27 - std Z+22,r2 - std Z+23,r3 - movw r22,r4 - movw r12,r6 - movw r14,r8 - movw r24,r10 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r4 - eor r15,r5 - eor r24,r6 - eor r25,r7 - eor r22,r8 - eor r23,r9 - eor r12,r10 - eor r13,r11 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r14 - eor r5,r15 - eor r6,r24 - eor r7,r25 - eor r8,r22 - eor r9,r23 - eor r10,r12 - eor r11,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r12 - mov r12,r0 - mov r0,r23 - mov r23,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r26 - std Z+45,r27 - std Z+46,r2 - std Z+47,r3 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r26,Z+52 - ldd r27,Z+53 - ldd r2,Z+54 - ldd r3,Z+55 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r26 - std Z+53,r27 - std Z+54,r2 - std Z+55,r3 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r26,Z+60 - ldd r27,Z+61 - ldd r2,Z+62 - ldd r3,Z+63 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r26 - std Z+61,r27 - std Z+62,r2 - std Z+63,r3 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r2,Y+7 - ldd r3,Y+8 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+24,r4 - std Z+25,r5 - std Z+26,r6 - std Z+27,r7 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - st -Z,r3 - st -Z,r2 - st -Z,r27 - st -Z,r26 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - adiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gascon256_core_round, .-gascon256_core_round - - .text -.global drysponge256_g - .type drysponge256_g, @function -drysponge256_g: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 44 - subi r30,148 - sbci r31,255 - ld r19,Z - subi r30,108 - sbc r31,r1 - ldi r18,240 - std Y+25,r19 - std Y+26,r18 - std Y+9,r1 - std Y+10,r1 - std Y+11,r1 - std Y+12,r1 - std Y+13,r1 - std Y+14,r1 - std Y+15,r1 - std Y+16,r1 - std Y+17,r1 - std Y+18,r1 - std Y+19,r1 - std Y+20,r1 - std Y+21,r1 - std Y+22,r1 - std Y+23,r1 - std Y+24,r1 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r22,Z+ - ld r23,Z+ - ld r26,Z+ - ld r27,Z+ - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 -40: - ldd r24,Y+26 - eor r2,r24 - subi r24,15 - std Y+26,r24 - ld r10,Z - ldd r11,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r11,r10 - eor r2,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r10 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r2 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r2 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r18 - eor r12,r11 - eor r13,r2 - eor r15,r14 - eor r18,r24 - com r2 - std Y+1,r18 - st Z,r10 - std Z+8,r11 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r10,Z+1 - ldd r11,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r11,r10 - eor r3,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r10 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r3 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r3 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r19 - eor r12,r11 - eor r13,r3 - eor r15,r14 - eor r19,r24 - com r3 - std Y+2,r19 - std Z+1,r10 - std Z+9,r11 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r10,Z+2 - ldd r11,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r11,r10 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r10 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r20 - eor r12,r11 - eor r13,r4 - eor r15,r14 - eor r20,r24 - com r4 - std Y+3,r20 - std Z+2,r10 - std Z+10,r11 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r10,Z+3 - ldd r11,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r11,r10 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r10 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r21 - eor r12,r11 - eor r13,r5 - eor r15,r14 - eor r21,r24 - com r5 - std Y+4,r21 - std Z+3,r10 - std Z+11,r11 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r10,Z+4 - ldd r11,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r22,r24 - eor r11,r10 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r22 - mov r25,r10 - mov r0,r22 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r22 - eor r12,r11 - eor r13,r6 - eor r15,r14 - eor r22,r24 - com r6 - std Y+5,r22 - std Z+4,r10 - std Z+12,r11 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r10,Z+5 - ldd r11,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r23,r24 - eor r11,r10 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r23 - mov r25,r10 - mov r0,r23 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r23 - eor r12,r11 - eor r13,r7 - eor r15,r14 - eor r23,r24 - com r7 - std Y+6,r23 - std Z+5,r10 - std Z+13,r11 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r10,Z+6 - ldd r11,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r26,r24 - eor r11,r10 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r10 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r26 - eor r12,r11 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+7,r26 - std Z+6,r10 - std Z+14,r11 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r10,Z+7 - ldd r11,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r27,r24 - eor r11,r10 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r10 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r27 - eor r12,r11 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+8,r27 - std Z+7,r10 - std Z+15,r11 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - bst r10,0 - lsr r13 - ror r12 - ror r11 - ror r10 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r22 - std Z+13,r23 - std Z+14,r26 - std Z+15,r27 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r26,Z+22 - ldd r27,Z+23 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r23 - mov r23,r26 - mov r26,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r22 - std Z+21,r23 - std Z+22,r26 - std Z+23,r27 - movw r10,r2 - movw r12,r4 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - mov r0,r2 - mov r2,r4 - mov r4,r0 - mov r0,r3 - mov r3,r5 - mov r5,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - or r5,r0 - mov r0,r6 - mov r6,r8 - mov r8,r0 - mov r0,r7 - mov r7,r9 - mov r9,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r6,r10 - eor r7,r11 - eor r8,r12 - eor r9,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r26,Z+38 - ldd r27,Z+39 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r12 - mov r12,r0 - mov r0,r11 - mov r11,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r22 - std Z+37,r23 - std Z+38,r26 - std Z+39,r27 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r22,Z+44 - ldd r23,Z+45 - ldd r26,Z+46 - ldd r27,Z+47 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r22 - std Z+45,r23 - std Z+46,r26 - std Z+47,r27 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r22,Z+52 - ldd r23,Z+53 - ldd r26,Z+54 - ldd r27,Z+55 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r22 - std Z+53,r23 - std Z+54,r26 - std Z+55,r27 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r22,Z+60 - ldd r23,Z+61 - ldd r26,Z+62 - ldd r27,Z+63 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r22 - std Z+61,r23 - std Z+62,r26 - std Z+63,r27 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - ldd r10,Y+9 - ldd r11,Y+10 - ldd r12,Y+11 - ldd r13,Y+12 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - ldd r0,Z+12 - eor r10,r0 - ldd r0,Z+13 - eor r11,r0 - ldd r0,Z+14 - eor r12,r0 - ldd r0,Z+15 - eor r13,r0 - ldd r0,Z+32 - eor r10,r0 - ldd r0,Z+33 - eor r11,r0 - ldd r0,Z+34 - eor r12,r0 - ldd r0,Z+35 - eor r13,r0 - ldd r0,Z+52 - eor r10,r0 - ldd r0,Z+53 - eor r11,r0 - ldd r0,Z+54 - eor r12,r0 - ldd r0,Z+55 - eor r13,r0 - std Y+9,r10 - std Y+10,r11 - std Y+11,r12 - std Y+12,r13 - ldd r10,Y+13 - ldd r11,Y+14 - ldd r12,Y+15 - ldd r13,Y+16 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - ldd r0,Z+16 - eor r10,r0 - ldd r0,Z+17 - eor r11,r0 - ldd r0,Z+18 - eor r12,r0 - ldd r0,Z+19 - eor r13,r0 - ldd r0,Z+36 - eor r10,r0 - ldd r0,Z+37 - eor r11,r0 - ldd r0,Z+38 - eor r12,r0 - ldd r0,Z+39 - eor r13,r0 - ldd r0,Z+40 - eor r10,r0 - ldd r0,Z+41 - eor r11,r0 - ldd r0,Z+42 - eor r12,r0 - ldd r0,Z+43 - eor r13,r0 - std Y+13,r10 - std Y+14,r11 - std Y+15,r12 - std Y+16,r13 - ldd r10,Y+17 - ldd r11,Y+18 - ldd r12,Y+19 - ldd r13,Y+20 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - ld r0,Z - eor r10,r0 - ldd r0,Z+1 - eor r11,r0 - ldd r0,Z+2 - eor r12,r0 - ldd r0,Z+3 - eor r13,r0 - ldd r0,Z+20 - eor r10,r0 - ldd r0,Z+21 - eor r11,r0 - ldd r0,Z+22 - eor r12,r0 - ldd r0,Z+23 - eor r13,r0 - ldd r0,Z+44 - eor r10,r0 - ldd r0,Z+45 - eor r11,r0 - ldd r0,Z+46 - eor r12,r0 - ldd r0,Z+47 - eor r13,r0 - std Y+17,r10 - std Y+18,r11 - std Y+19,r12 - std Y+20,r13 - ldd r10,Y+21 - ldd r11,Y+22 - ldd r12,Y+23 - ldd r13,Y+24 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - ldd r0,Z+4 - eor r10,r0 - ldd r0,Z+5 - eor r11,r0 - ldd r0,Z+6 - eor r12,r0 - ldd r0,Z+7 - eor r13,r0 - ldd r0,Z+8 - eor r10,r0 - ldd r0,Z+9 - eor r11,r0 - ldd r0,Z+10 - eor r12,r0 - ldd r0,Z+11 - eor r13,r0 - ldd r0,Z+48 - eor r10,r0 - ldd r0,Z+49 - eor r11,r0 - ldd r0,Z+50 - eor r12,r0 - ldd r0,Z+51 - eor r13,r0 - std Y+21,r10 - std Y+22,r11 - std Y+23,r12 - std Y+24,r13 - ldd r10,Y+25 - dec r10 - std Y+25,r10 - breq 6623f - rjmp 40b -6623: - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - std Z+28,r6 - std Z+29,r7 - std Z+30,r8 - std Z+31,r9 - st -Z,r27 - st -Z,r26 - st -Z,r23 - st -Z,r22 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - ldi r25,72 - add r30,r25 - adc r31,r1 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - ldd r2,Y+17 - ldd r3,Y+18 - ldd r4,Y+19 - ldd r5,Y+20 - ldd r6,Y+21 - ldd r7,Y+22 - ldd r8,Y+23 - ldd r9,Y+24 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - std Z+12,r6 - std Z+13,r7 - std Z+14,r8 - std Z+15,r9 - adiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size drysponge256_g, .-drysponge256_g - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.c deleted file mode 100644 index 6dfe48c..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.c +++ /dev/null @@ -1,611 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-drysponge.h" -#include - -#if !defined(__AVR__) - -/* Right rotations in bit-interleaved format */ -#define intRightRotateEven(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, (bits)); \ - _x1 = rightRotate(_x1, (bits)); \ - _x0 | (((uint64_t)_x1) << 32); \ - })) -#define intRightRotateOdd(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, ((bits) + 1) % 32); \ - _x1 = rightRotate(_x1, (bits)); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate1_64(x) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate1(_x0); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate2_64(x) (intRightRotateEven((x), 1)) -#define intRightRotate3_64(x) (intRightRotateOdd((x), 1)) -#define intRightRotate4_64(x) (intRightRotateEven((x), 2)) -#define intRightRotate5_64(x) (intRightRotateOdd((x), 2)) -#define intRightRotate6_64(x) (intRightRotateEven((x), 3)) -#define intRightRotate7_64(x) (intRightRotateOdd((x), 3)) -#define intRightRotate8_64(x) (intRightRotateEven((x), 4)) -#define intRightRotate9_64(x) (intRightRotateOdd((x), 4)) -#define intRightRotate10_64(x) (intRightRotateEven((x), 5)) -#define intRightRotate11_64(x) (intRightRotateOdd((x), 5)) -#define intRightRotate12_64(x) (intRightRotateEven((x), 6)) -#define intRightRotate13_64(x) (intRightRotateOdd((x), 6)) -#define intRightRotate14_64(x) (intRightRotateEven((x), 7)) -#define intRightRotate15_64(x) (intRightRotateOdd((x), 7)) -#define intRightRotate16_64(x) (intRightRotateEven((x), 8)) -#define intRightRotate17_64(x) (intRightRotateOdd((x), 8)) -#define intRightRotate18_64(x) (intRightRotateEven((x), 9)) -#define intRightRotate19_64(x) (intRightRotateOdd((x), 9)) -#define intRightRotate20_64(x) (intRightRotateEven((x), 10)) -#define intRightRotate21_64(x) (intRightRotateOdd((x), 10)) -#define intRightRotate22_64(x) (intRightRotateEven((x), 11)) -#define intRightRotate23_64(x) (intRightRotateOdd((x), 11)) -#define intRightRotate24_64(x) (intRightRotateEven((x), 12)) -#define intRightRotate25_64(x) (intRightRotateOdd((x), 12)) -#define intRightRotate26_64(x) (intRightRotateEven((x), 13)) -#define intRightRotate27_64(x) (intRightRotateOdd((x), 13)) -#define intRightRotate28_64(x) (intRightRotateEven((x), 14)) -#define intRightRotate29_64(x) (intRightRotateOdd((x), 14)) -#define intRightRotate30_64(x) (intRightRotateEven((x), 15)) -#define intRightRotate31_64(x) (intRightRotateOdd((x), 15)) -#define intRightRotate32_64(x) (intRightRotateEven((x), 16)) -#define intRightRotate33_64(x) (intRightRotateOdd((x), 16)) -#define intRightRotate34_64(x) (intRightRotateEven((x), 17)) -#define intRightRotate35_64(x) (intRightRotateOdd((x), 17)) -#define intRightRotate36_64(x) (intRightRotateEven((x), 18)) -#define intRightRotate37_64(x) (intRightRotateOdd((x), 18)) -#define intRightRotate38_64(x) (intRightRotateEven((x), 19)) -#define intRightRotate39_64(x) (intRightRotateOdd((x), 19)) -#define intRightRotate40_64(x) (intRightRotateEven((x), 20)) -#define intRightRotate41_64(x) (intRightRotateOdd((x), 20)) -#define intRightRotate42_64(x) (intRightRotateEven((x), 21)) -#define intRightRotate43_64(x) (intRightRotateOdd((x), 21)) -#define intRightRotate44_64(x) (intRightRotateEven((x), 22)) -#define intRightRotate45_64(x) (intRightRotateOdd((x), 22)) -#define intRightRotate46_64(x) (intRightRotateEven((x), 23)) -#define intRightRotate47_64(x) (intRightRotateOdd((x), 23)) -#define intRightRotate48_64(x) (intRightRotateEven((x), 24)) -#define intRightRotate49_64(x) (intRightRotateOdd((x), 24)) -#define intRightRotate50_64(x) (intRightRotateEven((x), 25)) -#define intRightRotate51_64(x) (intRightRotateOdd((x), 25)) -#define intRightRotate52_64(x) (intRightRotateEven((x), 26)) -#define intRightRotate53_64(x) (intRightRotateOdd((x), 26)) -#define intRightRotate54_64(x) (intRightRotateEven((x), 27)) -#define intRightRotate55_64(x) (intRightRotateOdd((x), 27)) -#define intRightRotate56_64(x) (intRightRotateEven((x), 28)) -#define intRightRotate57_64(x) (intRightRotateOdd((x), 28)) -#define intRightRotate58_64(x) (intRightRotateEven((x), 29)) -#define intRightRotate59_64(x) (intRightRotateOdd((x), 29)) -#define intRightRotate60_64(x) (intRightRotateEven((x), 30)) -#define intRightRotate61_64(x) (intRightRotateOdd((x), 30)) -#define intRightRotate62_64(x) (intRightRotateEven((x), 31)) -#define intRightRotate63_64(x) (intRightRotateOdd((x), 31)) - -void gascon128_core_round(gascon128_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); -#endif - - /* Add the round constant to the middle of the state */ - x2 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x4; x2 ^= x1; x4 ^= x3; t0 = (~x0) & x1; t1 = (~x1) & x2; - t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x0; x0 ^= t1; - x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; x1 ^= x0; x3 ^= x2; - x0 ^= x4; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); -#endif -} - -void gascon256_core_round(gascon256_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; - uint64_t x8 = state->S[8]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); - uint64_t x8 = le_load_word64(state->B + 64); -#endif - - /* Add the round constant to the middle of the state */ - x4 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x8; x2 ^= x1; x4 ^= x3; x6 ^= x5; x8 ^= x7; t0 = (~x0) & x1; - t1 = (~x1) & x2; t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x5; - t5 = (~x5) & x6; t6 = (~x6) & x7; t7 = (~x7) & x8; t8 = (~x8) & x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t5; x5 ^= t6; x6 ^= t7; - x7 ^= t8; x8 ^= t0; x1 ^= x0; x3 ^= x2; x5 ^= x4; x7 ^= x6; x0 ^= x8; - x4 = ~x4; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - x5 ^= intRightRotate31_64(x5) ^ intRightRotate26_64(x5); - x6 ^= intRightRotate53_64(x6) ^ intRightRotate58_64(x6); - x7 ^= intRightRotate9_64(x7) ^ intRightRotate46_64(x7); - x8 ^= intRightRotate43_64(x8) ^ intRightRotate50_64(x8); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; - state->S[8] = x8; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); - le_store_word64(state->B + 64, x8); -#endif -} - -void drysponge128_g(drysponge128_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes */ - gascon128_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon128_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4]; - } -} - -void drysponge256_g(drysponge256_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes. - * And so on for a total of 64 bytes XOR'ed into the output data. */ - gascon256_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon256_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - } -} - -#endif /* !__AVR__ */ - -void drysponge128_g_core(drysponge128_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon128_core_round(&(state->c), round); -} - -void drysponge256_g_core(drysponge256_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon256_core_round(&(state->c), round); -} - -/** - * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) - * \brief Selects an element of x in constant time. - * - * \param x Points to the four elements of x. - * \param index Index of which element to extract between 0 and 3. - * - * \return The selected element of x. - */ -#if !defined(__AVR__) -STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) -{ - /* We need to be careful how we select each element of x because - * we are doing a data-dependent fetch here. Do the fetch in a way - * that should avoid cache timing issues by fetching every element - * of x and masking away the ones we don't want. - * - * There is a possible side channel here with respect to power analysis. - * The "mask" value will be all-ones for the selected index and all-zeroes - * for the other indexes. This may show up as different power consumption - * for the "result ^= x[i] & mask" statement when i is the selected index. - * Such a side channel could in theory allow reading the plaintext input - * to the cipher by analysing the CPU's power consumption. - * - * The DryGASCON specification acknowledges the possibility of plaintext - * recovery in section 7.4. For software mitigation the specification - * suggests randomization of the indexes into c and x and randomization - * of the order of processing words. We aren't doing that here yet. - * Patches welcome to fix this. - */ - uint32_t mask = -((uint32_t)((0x04 - index) >> 2)); - uint32_t result = x[0] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x01)) >> 2)); - result ^= x[1] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x02)) >> 2)); - result ^= x[2] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); - return result ^ (x[3] & mask); -} -#else -/* AVR is more or less immune to cache timing issues because it doesn't - * have anything like an L1 or L2 cache. Select the word directly */ -#define drysponge_select_x(x, index) ((x)[(index)]) -#endif - -/** - * \brief Mixes a 32-bit value into the DrySPONGE128 state. - * - * \param state DrySPONGE128 state. - * \param data The data to be mixed in the bottom 10 bits. - */ -static void drysponge128_mix_phase_round - (drysponge128_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); -} - -/** - * \brief Mixes a 32-bit value into the DrySPONGE256 state. - * - * \param state DrySPONGE256 state. - * \param data The data to be mixed in the bottom 18 bits. - */ -static void drysponge256_mix_phase_round - (drysponge256_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); - state->c.W[10] ^= drysponge_select_x(state->x.W, (data >> 10) & 0x03); - state->c.W[12] ^= drysponge_select_x(state->x.W, (data >> 12) & 0x03); - state->c.W[14] ^= drysponge_select_x(state->x.W, (data >> 14) & 0x03); - state->c.W[16] ^= drysponge_select_x(state->x.W, (data >> 16) & 0x03); -} - -/** - * \brief Mixes an input block into a DrySPONGE128 state. - * - * \param state The DrySPONGE128 state. - * \param data Full rate block containing the input data. - */ -static void drysponge128_mix_phase - (drysponge128_state_t *state, const unsigned char data[DRYSPONGE128_RATE]) -{ - /* Mix 10-bit groups into the output, with the domain - * separator added to the last two groups */ - drysponge128_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[1] >> 2) | (((uint32_t)(data[2])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[2] >> 4) | (((uint32_t)(data[3])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[3] >> 6) | (((uint32_t)(data[4])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[5] | (((uint32_t)(data[6])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[6] >> 2) | (((uint32_t)(data[7])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[7] >> 4) | (((uint32_t)(data[8])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[8] >> 6) | (((uint32_t)(data[9])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[10] | (((uint32_t)(data[11])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[12] >> 4) | (((uint32_t)(data[13])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, ((data[13] >> 6) | (((uint32_t)(data[14])) << 2))); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, data[15] ^ state->domain); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, state->domain >> 10); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -/** - * \brief Mixes an input block into a DrySPONGE256 state. - * - * \param state The DrySPONGE256 state. - * \param data Full rate block containing the input data. - */ -static void drysponge256_mix_phase - (drysponge256_state_t *state, const unsigned char data[DRYSPONGE256_RATE]) -{ - /* Mix 18-bit groups into the output, with the domain in the last group */ - drysponge256_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8) | - (((uint32_t)(data[2])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[2] >> 2) | (((uint32_t)(data[3])) << 6) | - (((uint32_t)(data[4])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[4] >> 4) | (((uint32_t)(data[5])) << 4) | - (((uint32_t)(data[6])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[6] >> 6) | (((uint32_t)(data[7])) << 2) | - (((uint32_t)(data[8])) << 10)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, data[9] | (((uint32_t)(data[10])) << 8) | - (((uint32_t)(data[11])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6) | - (((uint32_t)(data[13])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[13] >> 4) | (((uint32_t)(data[14])) << 4) | - (((uint32_t)(data[15])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[15] >> 6) ^ state->domain); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE128_RATE) { - drysponge128_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE128_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE128_RATE - len - 1); - drysponge128_mix_phase(state, padded); - } -} - -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE256_RATE) { - drysponge256_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE256_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE256_RATE - len - 1); - drysponge256_mix_phase(state, padded); - } -} - -/** - * \brief Determine if some of the words of an "x" value are identical. - * - * \param x Points to the "x" buffer to check. - * - * \return Non-zero if some of the words are the same, zero if they are - * distinct from each other. - * - * We try to perform the check in constant time to avoid giving away - * any information about the value of the key. - */ -static int drysponge_x_words_are_same(const uint32_t x[4]) -{ - unsigned i, j; - int result = 0; - for (i = 0; i < 3; ++i) { - for (j = i + 1; j < 4; ++j) { - uint32_t check = x[i] ^ x[j]; - result |= (int)((0x100000000ULL - check) >> 32); - } - } - return result; -} - -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-128 state with repeated copies of the key */ - memcpy(state->c.B, key, 16); - memcpy(state->c.B + 16, key, 16); - memcpy(state->c.B + 32, key, 8); - - /* Generate the "x" value for the state. All four words of "x" - * must be unique because they will be used in drysponge_select_x() - * as stand-ins for the bit pairs 00, 01, 10, and 11. - * - * Run the core block operation over and over until "x" is unique. - * Technically the runtime here is key-dependent and not constant. - * If the input key is randomized, this should only take 1 round - * on average so it is "almost constant time". - */ - do { - gascon128_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE128_INIT_ROUNDS; - state->domain = DRYDOMAIN128_NONCE; - if (final_block) - state->domain |= DRYDOMAIN128_FINAL; - drysponge128_f_absorb(state, nonce, 16); - drysponge128_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE128_ROUNDS; -} - -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-256 state with repeated copies of the key */ - memcpy(state->c.B, key, 32); - memcpy(state->c.B + 32, key, 32); - memcpy(state->c.B + 64, key, 8); - - /* Generate the "x" value for the state */ - do { - gascon256_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE256_INIT_ROUNDS; - state->domain = DRYDOMAIN256_NONCE; - if (final_block) - state->domain |= DRYDOMAIN256_FINAL; - drysponge256_f_absorb(state, nonce, 16); - drysponge256_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE256_ROUNDS; -} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.h deleted file mode 100644 index 05b0c16..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-drysponge.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_DRYSPONGE_H -#define LW_INTERNAL_DRYSPONGE_H - -#include "internal-util.h" - -/** - * \file internal-drysponge.h - * \brief Internal implementation of DrySPONGE for the DryGASCON cipher. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the GASCON-128 permutation state in bytes. - */ -#define GASCON128_STATE_SIZE 40 - -/** - * \brief Size of the GASCON-256 permutation state in bytes. - */ -#define GASCON256_STATE_SIZE 72 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE128. - */ -#define DRYSPONGE128_RATE 16 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE256. - */ -#define DRYSPONGE256_RATE 16 - -/** - * \brief Size of the "x" value for DrySPONGE128. - */ -#define DRYSPONGE128_XSIZE 16 - -/** - * \brief Size of the "x" value for DrySPONGE256. - */ -#define DRYSPONGE256_XSIZE 16 - -/** - * \brief Normal number of rounds for DrySPONGE128 when absorbing - * and squeezing data. - */ -#define DRYSPONGE128_ROUNDS 7 - -/** - * \brief Number of rounds for DrySPONGE128 during initialization. - */ -#define DRYSPONGE128_INIT_ROUNDS 11 - -/** - * \brief Normal number of rounds for DrySPONGE256 when absorbing - * and squeezing data. - */ -#define DRYSPONGE256_ROUNDS 8 - -/** - * \brief Number of rounds for DrySPONGE256 during initialization. - */ -#define DRYSPONGE256_INIT_ROUNDS 12 - -/** - * \brief DrySPONGE128 domain bit for a padded block. - */ -#define DRYDOMAIN128_PADDED (1 << 8) - -/** - * \brief DrySPONGE128 domain bit for a final block. - */ -#define DRYDOMAIN128_FINAL (1 << 9) - -/** - * \brief DrySPONGE128 domain value for processing the nonce. - */ -#define DRYDOMAIN128_NONCE (1 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the associated data. - */ -#define DRYDOMAIN128_ASSOC_DATA (2 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the message. - */ -#define DRYDOMAIN128_MESSAGE (3 << 10) - -/** - * \brief DrySPONGE256 domain bit for a padded block. - */ -#define DRYDOMAIN256_PADDED (1 << 2) - -/** - * \brief DrySPONGE256 domain bit for a final block. - */ -#define DRYDOMAIN256_FINAL (1 << 3) - -/** - * \brief DrySPONGE256 domain value for processing the nonce. - */ -#define DRYDOMAIN256_NONCE (1 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the associated data. - */ -#define DRYDOMAIN256_ASSOC_DATA (2 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the message. - */ -#define DRYDOMAIN256_MESSAGE (3 << 4) - -/** - * \brief Internal state of the GASCON-128 permutation. - */ -typedef union -{ - uint64_t S[GASCON128_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON128_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON128_STATE_SIZE]; /**< Bytes of the state */ - -} gascon128_state_t; - -/** - * \brief Internal state of the GASCON-256 permutation. - */ -typedef union -{ - uint64_t S[GASCON256_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON256_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON256_STATE_SIZE]; /**< Bytes of the state */ - -} gascon256_state_t; - -/** - * \brief Structure of a rate block for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_RATE]; /**< Bytes of the rate */ - -} drysponge128_rate_t; - -/** - * \brief Structure of a rate block for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_RATE]; /**< Bytes of the rate */ - -} drysponge256_rate_t; - -/** - * \brief Structure of the "x" value for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_XSIZE]; /**< Bytes of the rate */ - -} drysponge128_x_t; - -/** - * \brief Structure of the "x" value for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_XSIZE]; /**< Bytes of the rate */ - -} drysponge256_x_t; - -/** - * \brief Structure of the rolling DrySPONGE128 state. - */ -typedef struct -{ - gascon128_state_t c; /**< GASCON-128 state for the capacity */ - drysponge128_rate_t r; /**< Buffer for a rate block of data */ - drysponge128_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge128_state_t; - -/** - * \brief Structure of the rolling DrySPONGE256 state. - */ -typedef struct -{ - gascon256_state_t c; /**< GASCON-256 state for the capacity */ - drysponge256_rate_t r; /**< Buffer for a rate block of data */ - drysponge256_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge256_state_t; - -/** - * \brief Permutes the GASCON-128 state using one iteration of CoreRound. - * - * \param state The GASCON-128 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon128_core_round(gascon128_state_t *state, uint8_t round); - -/** - * \brief Permutes the GASCON-256 state using one iteration of CoreRound. - * - * \param state The GASCON-256 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon256_core_round(gascon256_state_t *state, uint8_t round); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds and squeezes data out of the GASGON-128 state. - * - * \param state The DrySPONGE128 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge128_g(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds and squeezes data out of the GASGON-256 state. - * - * \param state The DrySPONGE256 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge256_g(drysponge256_state_t *state); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE128 state. - */ -void drysponge128_g_core(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE256 state. - */ -void drysponge256_g_core(drysponge256_state_t *state); - -/** - * \brief Performs the absorption phase of the DrySPONGE128 F function. - * - * \param state The DrySPONGE128 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE128_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge128_g() or - * drysponge128_g_core() to perform the full F operation. - */ -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Performs the absorption phase of the DrySPONGE256 F function. - * - * \param state The DrySPONGE256 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE256_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge256_g() or - * drysponge256_g_core() to perform the full F operation. - */ -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Set up a DrySPONGE128 state to begin encryption or decryption. - * - * \param state The DrySPONGE128 state. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -/** - * \brief Set up a DrySPONGE256 state to begin encryption or decryption. - * - * \param state The DrySPONGE256 state. - * \param key Points to the 32 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-util.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon128/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/api.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.c new file mode 100644 index 0000000..e963903 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.c @@ -0,0 +1,421 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drygascon.h" +#include "internal-drysponge.h" +#include + +aead_cipher_t const drygascon128_cipher = { + "DryGASCON128", + DRYGASCON128_KEY_SIZE, + DRYGASCON128_NONCE_SIZE, + DRYGASCON128_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon128_aead_encrypt, + drygascon128_aead_decrypt +}; + +aead_cipher_t const drygascon256_cipher = { + "DryGASCON256", + DRYGASCON256_KEY_SIZE, + DRYGASCON256_NONCE_SIZE, + DRYGASCON256_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon256_aead_encrypt, + drygascon256_aead_decrypt +}; + +aead_hash_algorithm_t const drygascon128_hash_algorithm = { + "DryGASCON128-HASH", + sizeof(int), + DRYGASCON128_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon128_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const drygascon256_hash_algorithm = { + "DryGASCON256-HASH", + sizeof(int), + DRYGASCON256_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon256_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Processes associated data for DryGASCON128. + * + * \param state DrySPONGE128 sponge state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data, must not be zero. + * \param finalize Non-zero to finalize packet processing because + * the message is zero-length. + */ +static void drygascon128_process_ad + (drysponge128_state_t *state, const unsigned char *ad, + unsigned long long adlen, int finalize) +{ + /* Process all blocks except the last one */ + while (adlen > DRYSPONGE128_RATE) { + drysponge128_f_absorb(state, ad, DRYSPONGE128_RATE); + drysponge128_g_core(state); + ad += DRYSPONGE128_RATE; + adlen -= DRYSPONGE128_RATE; + } + + /* Process the last block with domain separation and padding */ + state->domain = DRYDOMAIN128_ASSOC_DATA; + if (finalize) + state->domain |= DRYDOMAIN128_FINAL; + if (adlen < DRYSPONGE128_RATE) + state->domain |= DRYDOMAIN128_PADDED; + drysponge128_f_absorb(state, ad, (unsigned)adlen); + drysponge128_g(state); +} + +/** + * \brief Processes associated data for DryGASCON256. + * + * \param state DrySPONGE256 sponge state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data, must not be zero. + * \param finalize Non-zero to finalize packet processing because + * the message is zero-length. + */ +static void drygascon256_process_ad + (drysponge256_state_t *state, const unsigned char *ad, + unsigned long long adlen, int finalize) +{ + /* Process all blocks except the last one */ + while (adlen > DRYSPONGE256_RATE) { + drysponge256_f_absorb(state, ad, DRYSPONGE256_RATE); + drysponge256_g_core(state); + ad += DRYSPONGE256_RATE; + adlen -= DRYSPONGE256_RATE; + } + + /* Process the last block with domain separation and padding */ + state->domain = DRYDOMAIN256_ASSOC_DATA; + if (finalize) + state->domain |= DRYDOMAIN256_FINAL; + if (adlen < DRYSPONGE256_RATE) + state->domain |= DRYDOMAIN256_PADDED; + drysponge256_f_absorb(state, ad, (unsigned)adlen); + drysponge256_g(state); +} + +int drygascon128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge128_state_t state; + unsigned temp; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + DRYGASCON128_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + drysponge128_setup(&state, k, npub, adlen == 0 && mlen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon128_process_ad(&state, ad, adlen, mlen == 0); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + /* Processs all blocks except the last one */ + while (mlen > DRYSPONGE128_RATE) { + drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); + lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE128_RATE); + drysponge128_g(&state); + c += DRYSPONGE128_RATE; + m += DRYSPONGE128_RATE; + mlen -= DRYSPONGE128_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; + if (mlen < DRYSPONGE128_RATE) + state.domain |= DRYDOMAIN128_PADDED; + temp = (unsigned)mlen; + drysponge128_f_absorb(&state, m, temp); + lw_xor_block_2_src(c, m, state.r.B, temp); + drysponge128_g(&state); + c += temp; + } + + /* Generate the authentication tag */ + memcpy(c, state.r.B, DRYGASCON128_TAG_SIZE); + return 0; +} + +int drygascon128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge128_state_t state; + unsigned char *mtemp = m; + unsigned temp; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < DRYGASCON128_TAG_SIZE) + return -1; + *mlen = clen - DRYGASCON128_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + clen -= DRYGASCON128_TAG_SIZE; + drysponge128_setup(&state, k, npub, adlen == 0 && clen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon128_process_ad(&state, ad, adlen, clen == 0); + + /* Decrypt the ciphertext to produce the plaintext */ + if (clen > 0) { + /* Processs all blocks except the last one */ + while (clen > DRYSPONGE128_RATE) { + lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE128_RATE); + drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); + drysponge128_g(&state); + c += DRYSPONGE128_RATE; + m += DRYSPONGE128_RATE; + clen -= DRYSPONGE128_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; + if (clen < DRYSPONGE128_RATE) + state.domain |= DRYDOMAIN128_PADDED; + temp = (unsigned)clen; + lw_xor_block_2_src(m, c, state.r.B, temp); + drysponge128_f_absorb(&state, m, temp); + drysponge128_g(&state); + c += temp; + } + + /* Check the authentication tag */ + return aead_check_tag(mtemp, *mlen, state.r.B, c, DRYGASCON128_TAG_SIZE); +} + +int drygascon256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge256_state_t state; + unsigned temp; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + DRYGASCON256_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + drysponge256_setup(&state, k, npub, adlen == 0 && mlen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon256_process_ad(&state, ad, adlen, mlen == 0); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + /* Processs all blocks except the last one */ + while (mlen > DRYSPONGE256_RATE) { + drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); + lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE256_RATE); + drysponge256_g(&state); + c += DRYSPONGE256_RATE; + m += DRYSPONGE256_RATE; + mlen -= DRYSPONGE256_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; + if (mlen < DRYSPONGE256_RATE) + state.domain |= DRYDOMAIN256_PADDED; + temp = (unsigned)mlen; + drysponge256_f_absorb(&state, m, temp); + lw_xor_block_2_src(c, m, state.r.B, temp); + drysponge256_g(&state); + c += temp; + } + + /* Generate the authentication tag */ + memcpy(c, state.r.B, 16); + drysponge256_g(&state); + memcpy(c + 16, state.r.B, 16); + return 0; +} + +int drygascon256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge256_state_t state; + unsigned char *mtemp = m; + unsigned temp; + int result; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < DRYGASCON256_TAG_SIZE) + return -1; + *mlen = clen - DRYGASCON256_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + clen -= DRYGASCON256_TAG_SIZE; + drysponge256_setup(&state, k, npub, adlen == 0 && clen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon256_process_ad(&state, ad, adlen, clen == 0); + + /* Decrypt the ciphertext to produce the plaintext */ + if (clen > 0) { + /* Processs all blocks except the last one */ + while (clen > DRYSPONGE256_RATE) { + lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE256_RATE); + drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); + drysponge256_g(&state); + c += DRYSPONGE256_RATE; + m += DRYSPONGE256_RATE; + clen -= DRYSPONGE256_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; + if (clen < DRYSPONGE256_RATE) + state.domain |= DRYDOMAIN256_PADDED; + temp = (unsigned)clen; + lw_xor_block_2_src(m, c, state.r.B, temp); + drysponge256_f_absorb(&state, m, temp); + drysponge256_g(&state); + c += temp; + } + + /* Check the authentication tag which is split into two pieces */ + result = aead_check_tag(0, 0, state.r.B, c, 16); + drysponge256_g(&state); + return aead_check_tag_precheck + (mtemp, *mlen, state.r.B, c + 16, 16, ~result); +} + +/** + * \brief Precomputed initialization vector for DryGASCON128-HASH. + * + * This is the CST_H value from the DryGASCON specification after it + * has been processed by the key setup function for DrySPONGE128. + */ +static unsigned char const drygascon128_hash_init[] = { + /* c */ + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + /* x */ + 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, + 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89 +}; + +int drygascon128_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + drysponge128_state_t state; + memcpy(state.c.B, drygascon128_hash_init, sizeof(state.c.B)); + memcpy(state.x.B, drygascon128_hash_init + sizeof(state.c.B), + sizeof(state.x.B)); + state.domain = 0; + state.rounds = DRYSPONGE128_ROUNDS; + drygascon128_process_ad(&state, in, inlen, 1); + memcpy(out, state.r.B, 16); + drysponge128_g(&state); + memcpy(out + 16, state.r.B, 16); + return 0; +} + +/** + * \brief Precomputed initialization vector for DryGASCON256-HASH. + * + * This is the CST_H value from the DryGASCON specification after it + * has been processed by the key setup function for DrySPONGE256. + */ +static unsigned char const drygascon256_hash_init[] = { + /* c */ + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, + 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, + 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + /* x */ + 0x45, 0x28, 0x21, 0xe6, 0x38, 0xd0, 0x13, 0x77, + 0xbe, 0x54, 0x66, 0xcf, 0x34, 0xe9, 0x0c, 0x6c +}; + +int drygascon256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + drysponge256_state_t state; + memcpy(state.c.B, drygascon256_hash_init, sizeof(state.c.B)); + memcpy(state.x.B, drygascon256_hash_init + sizeof(state.c.B), + sizeof(state.x.B)); + state.domain = 0; + state.rounds = DRYSPONGE256_ROUNDS; + drygascon256_process_ad(&state, in, inlen, 1); + memcpy(out, state.r.B, 16); + drysponge256_g(&state); + memcpy(out + 16, state.r.B, 16); + drysponge256_g(&state); + memcpy(out + 32, state.r.B, 16); + drysponge256_g(&state); + memcpy(out + 48, state.r.B, 16); + return 0; +} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.h new file mode 100644 index 0000000..12e18c3 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/drygascon.h @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_DRYGASCON_H +#define LWCRYPTO_DRYGASCON_H + +#include "aead-common.h" + +/** + * \file drygascon.h + * \brief DryGASCON authenticated encryption algorithm. + * + * DryGASCON is a family of authenticated encryption algorithms based + * around a generalised version of the ASCON permutation. DryGASCON + * is designed to provide some protection against power analysis. + * + * There are four algorithms in the DryGASCON family: + * + * \li DryGASCON128 is an authenticated encryption algorithm with a + * 128-bit key, a 128-bit nonce, and a 128-bit authentication tag. + * \li DryGASCON256 is an authenticated encryption algorithm with a + * 256-bit key, a 128-bit nonce, and a 128-256 authentication tag. + * \li DryGASCON128-HASH is a hash algorithm with a 256-bit output. + * \li DryGASCON256-HASH is a hash algorithm with a 512-bit output. + * + * DryGASCON128 and DryGASCON128-HASH are the primary members of the family. + * + * References: https://github.com/sebastien-riou/DryGASCON + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for DryGASCON128. + */ +#define DRYGASCON128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for DryGASCON128. + */ +#define DRYGASCON128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for DryGASCON128. + */ +#define DRYGASCON128_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for DryGASCON128-HASH. + */ +#define DRYGASCON128_HASH_SIZE 32 + +/** + * \brief Size of the key for DryGASCON256. + */ +#define DRYGASCON256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for DryGASCON256. + */ +#define DRYGASCON256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for DryGASCON256. + */ +#define DRYGASCON256_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for DryGASCON256-HASH. + */ +#define DRYGASCON256_HASH_SIZE 64 + +/** + * \brief Meta-information block for the DryGASCON128 cipher. + */ +extern aead_cipher_t const drygascon128_cipher; + +/** + * \brief Meta-information block for the DryGASCON256 cipher. + */ +extern aead_cipher_t const drygascon256_cipher; + +/** + * \brief Meta-information block for DryGASCON128-HASH. + */ +extern aead_hash_algorithm_t const drygascon128_hash_algorithm; + +/** + * \brief Meta-information block for DryGASCON256-HASH. + */ +extern aead_hash_algorithm_t const drygascon256_hash_algorithm; + +/** + * \brief Encrypts and authenticates a packet with DryGASCON128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa drygascon128_aead_decrypt() + */ +int drygascon128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with DryGASCON128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa drygascon128_aead_encrypt() + */ +int drygascon128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with DryGASCON256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa drygascon256_aead_decrypt() + */ +int drygascon256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with DryGASCON256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa drygascon256_aead_encrypt() + */ +int drygascon256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with DRYGASCON128. + * + * \param out Buffer to receive the hash output which must be at least + * DRYGASCON128_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int drygascon128_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with DRYGASCON256. + * + * \param out Buffer to receive the hash output which must be at least + * DRYGASCON256_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int drygascon256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/hash.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys/hash.c new file mode 100644 index 0000000..34464d6 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "drygascon.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return drygascon128_hash(out, in, inlen); +} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge-avr.S b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge-avr.S new file mode 100644 index 0000000..84d0ff8 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge-avr.S @@ -0,0 +1,5092 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gascon128_core_round + .type gascon128_core_round, @function +gascon128_core_round: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + eor r4,r22 + ldd r23,Z+8 + ldd r12,Z+24 + ldd r13,Z+32 + eor r18,r13 + eor r4,r23 + eor r13,r12 + mov r14,r23 + mov r0,r18 + com r0 + and r14,r0 + mov r15,r4 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r4 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r18 + mov r0,r13 + com r0 + and r16,r0 + eor r18,r15 + eor r23,r24 + eor r4,r25 + eor r12,r16 + eor r13,r14 + eor r23,r18 + eor r12,r4 + eor r18,r13 + com r4 + st Z,r18 + std Z+8,r23 + std Z+24,r12 + std Z+32,r13 + ldd r23,Z+9 + ldd r12,Z+25 + ldd r13,Z+33 + eor r19,r13 + eor r5,r23 + eor r13,r12 + mov r14,r23 + mov r0,r19 + com r0 + and r14,r0 + mov r15,r5 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r5 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r19 + mov r0,r13 + com r0 + and r16,r0 + eor r19,r15 + eor r23,r24 + eor r5,r25 + eor r12,r16 + eor r13,r14 + eor r23,r19 + eor r12,r5 + eor r19,r13 + com r5 + std Z+1,r19 + std Z+9,r23 + std Z+25,r12 + std Z+33,r13 + ldd r23,Z+10 + ldd r12,Z+26 + ldd r13,Z+34 + eor r20,r13 + eor r6,r23 + eor r13,r12 + mov r14,r23 + mov r0,r20 + com r0 + and r14,r0 + mov r15,r6 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r6 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r20 + mov r0,r13 + com r0 + and r16,r0 + eor r20,r15 + eor r23,r24 + eor r6,r25 + eor r12,r16 + eor r13,r14 + eor r23,r20 + eor r12,r6 + eor r20,r13 + com r6 + std Z+2,r20 + std Z+10,r23 + std Z+26,r12 + std Z+34,r13 + ldd r23,Z+11 + ldd r12,Z+27 + ldd r13,Z+35 + eor r21,r13 + eor r7,r23 + eor r13,r12 + mov r14,r23 + mov r0,r21 + com r0 + and r14,r0 + mov r15,r7 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r7 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r21 + mov r0,r13 + com r0 + and r16,r0 + eor r21,r15 + eor r23,r24 + eor r7,r25 + eor r12,r16 + eor r13,r14 + eor r23,r21 + eor r12,r7 + eor r21,r13 + com r7 + std Z+3,r21 + std Z+11,r23 + std Z+27,r12 + std Z+35,r13 + ldd r23,Z+12 + ldd r12,Z+28 + ldd r13,Z+36 + eor r26,r13 + eor r8,r23 + eor r13,r12 + mov r14,r23 + mov r0,r26 + com r0 + and r14,r0 + mov r15,r8 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r8 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r26 + mov r0,r13 + com r0 + and r16,r0 + eor r26,r15 + eor r23,r24 + eor r8,r25 + eor r12,r16 + eor r13,r14 + eor r23,r26 + eor r12,r8 + eor r26,r13 + com r8 + std Z+4,r26 + std Z+12,r23 + std Z+28,r12 + std Z+36,r13 + ldd r23,Z+13 + ldd r12,Z+29 + ldd r13,Z+37 + eor r27,r13 + eor r9,r23 + eor r13,r12 + mov r14,r23 + mov r0,r27 + com r0 + and r14,r0 + mov r15,r9 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r9 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r27 + mov r0,r13 + com r0 + and r16,r0 + eor r27,r15 + eor r23,r24 + eor r9,r25 + eor r12,r16 + eor r13,r14 + eor r23,r27 + eor r12,r9 + eor r27,r13 + com r9 + std Z+5,r27 + std Z+13,r23 + std Z+29,r12 + std Z+37,r13 + ldd r23,Z+14 + ldd r12,Z+30 + ldd r13,Z+38 + eor r2,r13 + eor r10,r23 + eor r13,r12 + mov r14,r23 + mov r0,r2 + com r0 + and r14,r0 + mov r15,r10 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r10 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r2 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r15 + eor r23,r24 + eor r10,r25 + eor r12,r16 + eor r13,r14 + eor r23,r2 + eor r12,r10 + eor r2,r13 + com r10 + std Z+6,r2 + std Z+14,r23 + std Z+30,r12 + std Z+38,r13 + ldd r23,Z+15 + ldd r12,Z+31 + ldd r13,Z+39 + eor r3,r13 + eor r11,r23 + eor r13,r12 + mov r14,r23 + mov r0,r3 + com r0 + and r14,r0 + mov r15,r11 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r11 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r3 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r15 + eor r23,r24 + eor r11,r25 + eor r12,r16 + eor r13,r14 + eor r23,r3 + eor r12,r11 + eor r3,r13 + com r11 + std Z+7,r3 + std Z+15,r23 + std Z+31,r12 + std Z+39,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r18 + std Z+25,r19 + std Z+26,r20 + std Z+27,r21 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gascon128_core_round, .-gascon128_core_round + + .text +.global drysponge128_g + .type drysponge128_g, @function +drysponge128_g: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + subi r30,180 + sbci r31,255 + ld r19,Z + subi r30,76 + sbc r31,r1 + ldi r18,240 + std Z+40,r1 + std Z+41,r1 + std Z+42,r1 + std Z+43,r1 + std Z+44,r1 + std Z+45,r1 + std Z+46,r1 + std Z+47,r1 + std Z+48,r1 + std Z+49,r1 + std Z+50,r1 + std Z+51,r1 + std Z+52,r1 + std Z+53,r1 + std Z+54,r1 + std Z+55,r1 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 +38: + eor r4,r18 + ldd r12,Z+8 + ldd r13,Z+24 + ldd r14,Z+32 + eor r20,r14 + eor r4,r12 + eor r14,r13 + mov r15,r12 + mov r0,r20 + com r0 + and r15,r0 + mov r24,r4 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r4 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r20 + mov r0,r14 + com r0 + and r17,r0 + eor r20,r24 + eor r12,r25 + eor r4,r16 + eor r13,r17 + eor r14,r15 + eor r12,r20 + eor r13,r4 + eor r20,r14 + com r4 + st Z,r20 + std Z+8,r12 + std Z+24,r13 + std Z+32,r14 + ldd r12,Z+9 + ldd r13,Z+25 + ldd r14,Z+33 + eor r21,r14 + eor r5,r12 + eor r14,r13 + mov r15,r12 + mov r0,r21 + com r0 + and r15,r0 + mov r24,r5 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r5 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r21 + mov r0,r14 + com r0 + and r17,r0 + eor r21,r24 + eor r12,r25 + eor r5,r16 + eor r13,r17 + eor r14,r15 + eor r12,r21 + eor r13,r5 + eor r21,r14 + com r5 + std Z+1,r21 + std Z+9,r12 + std Z+25,r13 + std Z+33,r14 + ldd r12,Z+10 + ldd r13,Z+26 + ldd r14,Z+34 + eor r22,r14 + eor r6,r12 + eor r14,r13 + mov r15,r12 + mov r0,r22 + com r0 + and r15,r0 + mov r24,r6 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r6 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r22 + mov r0,r14 + com r0 + and r17,r0 + eor r22,r24 + eor r12,r25 + eor r6,r16 + eor r13,r17 + eor r14,r15 + eor r12,r22 + eor r13,r6 + eor r22,r14 + com r6 + std Z+2,r22 + std Z+10,r12 + std Z+26,r13 + std Z+34,r14 + ldd r12,Z+11 + ldd r13,Z+27 + ldd r14,Z+35 + eor r23,r14 + eor r7,r12 + eor r14,r13 + mov r15,r12 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r7 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r7 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r23 + mov r0,r14 + com r0 + and r17,r0 + eor r23,r24 + eor r12,r25 + eor r7,r16 + eor r13,r17 + eor r14,r15 + eor r12,r23 + eor r13,r7 + eor r23,r14 + com r7 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r14 + ldd r12,Z+12 + ldd r13,Z+28 + ldd r14,Z+36 + eor r26,r14 + eor r8,r12 + eor r14,r13 + mov r15,r12 + mov r0,r26 + com r0 + and r15,r0 + mov r24,r8 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r8 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r26 + mov r0,r14 + com r0 + and r17,r0 + eor r26,r24 + eor r12,r25 + eor r8,r16 + eor r13,r17 + eor r14,r15 + eor r12,r26 + eor r13,r8 + eor r26,r14 + com r8 + std Z+4,r26 + std Z+12,r12 + std Z+28,r13 + std Z+36,r14 + ldd r12,Z+13 + ldd r13,Z+29 + ldd r14,Z+37 + eor r27,r14 + eor r9,r12 + eor r14,r13 + mov r15,r12 + mov r0,r27 + com r0 + and r15,r0 + mov r24,r9 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r9 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r27 + mov r0,r14 + com r0 + and r17,r0 + eor r27,r24 + eor r12,r25 + eor r9,r16 + eor r13,r17 + eor r14,r15 + eor r12,r27 + eor r13,r9 + eor r27,r14 + com r9 + std Z+5,r27 + std Z+13,r12 + std Z+29,r13 + std Z+37,r14 + ldd r12,Z+14 + ldd r13,Z+30 + ldd r14,Z+38 + eor r2,r14 + eor r10,r12 + eor r14,r13 + mov r15,r12 + mov r0,r2 + com r0 + and r15,r0 + mov r24,r10 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r10 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r2 + mov r0,r14 + com r0 + and r17,r0 + eor r2,r24 + eor r12,r25 + eor r10,r16 + eor r13,r17 + eor r14,r15 + eor r12,r2 + eor r13,r10 + eor r2,r14 + com r10 + std Z+6,r2 + std Z+14,r12 + std Z+30,r13 + std Z+38,r14 + ldd r12,Z+15 + ldd r13,Z+31 + ldd r14,Z+39 + eor r3,r14 + eor r11,r12 + eor r14,r13 + mov r15,r12 + mov r0,r3 + com r0 + and r15,r0 + mov r24,r11 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r11 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r3 + mov r0,r14 + com r0 + and r17,r0 + eor r3,r24 + eor r12,r25 + eor r11,r16 + eor r13,r17 + eor r14,r15 + eor r12,r3 + eor r13,r11 + eor r3,r14 + com r11 + std Z+7,r3 + std Z+15,r12 + std Z+31,r13 + std Z+39,r14 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r20,Z+24 + ldd r21,Z+25 + ldd r22,Z+26 + ldd r23,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r20 + std Z+25,r21 + std Z+26,r22 + std Z+27,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r22,Z+34 + ldd r23,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r20 + std Z+33,r21 + std Z+34,r22 + std Z+35,r23 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + ldd r12,Z+40 + ldd r13,Z+41 + ldd r14,Z+42 + ldd r15,Z+43 + eor r12,r20 + eor r13,r21 + eor r14,r22 + eor r15,r23 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + std Z+40,r12 + std Z+41,r13 + std Z+42,r14 + std Z+43,r15 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + ldd r0,Z+24 + eor r12,r0 + ldd r0,Z+25 + eor r13,r0 + ldd r0,Z+26 + eor r14,r0 + ldd r0,Z+27 + eor r15,r0 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ldd r12,Z+48 + ldd r13,Z+49 + ldd r14,Z+50 + ldd r15,Z+51 + ldd r0,Z+8 + eor r12,r0 + ldd r0,Z+9 + eor r13,r0 + ldd r0,Z+10 + eor r14,r0 + ldd r0,Z+11 + eor r15,r0 + ldd r0,Z+28 + eor r12,r0 + ldd r0,Z+29 + eor r13,r0 + ldd r0,Z+30 + eor r14,r0 + ldd r0,Z+31 + eor r15,r0 + std Z+48,r12 + std Z+49,r13 + std Z+50,r14 + std Z+51,r15 + ldd r12,Z+52 + ldd r13,Z+53 + ldd r14,Z+54 + ldd r15,Z+55 + ldd r0,Z+12 + eor r12,r0 + ldd r0,Z+13 + eor r13,r0 + ldd r0,Z+14 + eor r14,r0 + ldd r0,Z+15 + eor r15,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + std Z+52,r12 + std Z+53,r13 + std Z+54,r14 + std Z+55,r15 + subi r18,15 + dec r19 + breq 5904f + rjmp 38b +5904: + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size drysponge128_g, .-drysponge128_g + + .text +.global gascon256_core_round + .type gascon256_core_round, @function +gascon256_core_round: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 26 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r26,Z+ + ld r27,Z+ + ld r2,Z+ + ld r3,Z+ + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + eor r4,r22 + ld r22,Z + ldd r23,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r23,r22 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r22 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r18 + eor r12,r23 + eor r13,r4 + eor r15,r14 + eor r18,r24 + com r4 + std Y+1,r18 + st Z,r22 + std Z+8,r23 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r22,Z+1 + ldd r23,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r23,r22 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r22 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r19 + eor r12,r23 + eor r13,r5 + eor r15,r14 + eor r19,r24 + com r5 + std Y+2,r19 + std Z+1,r22 + std Z+9,r23 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r22,Z+2 + ldd r23,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r23,r22 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r22 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r20 + eor r12,r23 + eor r13,r6 + eor r15,r14 + eor r20,r24 + com r6 + std Y+3,r20 + std Z+2,r22 + std Z+10,r23 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r22,Z+3 + ldd r23,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r23,r22 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r22 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r21 + eor r12,r23 + eor r13,r7 + eor r15,r14 + eor r21,r24 + com r7 + std Y+4,r21 + std Z+3,r22 + std Z+11,r23 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r22,Z+4 + ldd r23,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r26,r24 + eor r23,r22 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r22 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r26 + eor r12,r23 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+5,r26 + std Z+4,r22 + std Z+12,r23 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r22,Z+5 + ldd r23,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r27,r24 + eor r23,r22 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r22 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r27 + eor r12,r23 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+6,r27 + std Z+5,r22 + std Z+13,r23 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r22,Z+6 + ldd r23,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r2,r24 + eor r23,r22 + eor r10,r12 + eor r14,r13 + eor r24,r15 + mov r17,r2 + mov r25,r22 + mov r0,r2 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r10 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r10 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r2 + eor r12,r23 + eor r13,r10 + eor r15,r14 + eor r2,r24 + com r10 + std Y+7,r2 + std Z+6,r22 + std Z+14,r23 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r22,Z+7 + ldd r23,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r3,r24 + eor r23,r22 + eor r11,r12 + eor r14,r13 + eor r24,r15 + mov r17,r3 + mov r25,r22 + mov r0,r3 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r11 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r11 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r3 + eor r12,r23 + eor r13,r11 + eor r15,r14 + eor r3,r24 + com r11 + std Y+8,r3 + std Z+7,r22 + std Z+15,r23 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + bst r22,0 + lsr r13 + ror r12 + ror r23 + ror r22 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r26 + std Z+21,r27 + std Z+22,r2 + std Z+23,r3 + movw r22,r4 + movw r12,r6 + movw r14,r8 + movw r24,r10 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r4 + eor r15,r5 + eor r24,r6 + eor r25,r7 + eor r22,r8 + eor r23,r9 + eor r12,r10 + eor r13,r11 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r14 + eor r5,r15 + eor r6,r24 + eor r7,r25 + eor r8,r22 + eor r9,r23 + eor r10,r12 + eor r11,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r12 + mov r12,r0 + mov r0,r23 + mov r23,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r26 + std Z+45,r27 + std Z+46,r2 + std Z+47,r3 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r26,Z+52 + ldd r27,Z+53 + ldd r2,Z+54 + ldd r3,Z+55 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r26 + std Z+53,r27 + std Z+54,r2 + std Z+55,r3 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r26,Z+60 + ldd r27,Z+61 + ldd r2,Z+62 + ldd r3,Z+63 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r26 + std Z+61,r27 + std Z+62,r2 + std Z+63,r3 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r2,Y+7 + ldd r3,Y+8 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+24,r4 + std Z+25,r5 + std Z+26,r6 + std Z+27,r7 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + st -Z,r3 + st -Z,r2 + st -Z,r27 + st -Z,r26 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + adiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gascon256_core_round, .-gascon256_core_round + + .text +.global drysponge256_g + .type drysponge256_g, @function +drysponge256_g: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 44 + subi r30,148 + sbci r31,255 + ld r19,Z + subi r30,108 + sbc r31,r1 + ldi r18,240 + std Y+25,r19 + std Y+26,r18 + std Y+9,r1 + std Y+10,r1 + std Y+11,r1 + std Y+12,r1 + std Y+13,r1 + std Y+14,r1 + std Y+15,r1 + std Y+16,r1 + std Y+17,r1 + std Y+18,r1 + std Y+19,r1 + std Y+20,r1 + std Y+21,r1 + std Y+22,r1 + std Y+23,r1 + std Y+24,r1 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r22,Z+ + ld r23,Z+ + ld r26,Z+ + ld r27,Z+ + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 +40: + ldd r24,Y+26 + eor r2,r24 + subi r24,15 + std Y+26,r24 + ld r10,Z + ldd r11,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r11,r10 + eor r2,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r10 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r2 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r2 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r18 + eor r12,r11 + eor r13,r2 + eor r15,r14 + eor r18,r24 + com r2 + std Y+1,r18 + st Z,r10 + std Z+8,r11 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r10,Z+1 + ldd r11,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r11,r10 + eor r3,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r10 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r3 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r3 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r19 + eor r12,r11 + eor r13,r3 + eor r15,r14 + eor r19,r24 + com r3 + std Y+2,r19 + std Z+1,r10 + std Z+9,r11 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r10,Z+2 + ldd r11,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r11,r10 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r10 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r20 + eor r12,r11 + eor r13,r4 + eor r15,r14 + eor r20,r24 + com r4 + std Y+3,r20 + std Z+2,r10 + std Z+10,r11 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r10,Z+3 + ldd r11,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r11,r10 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r10 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r21 + eor r12,r11 + eor r13,r5 + eor r15,r14 + eor r21,r24 + com r5 + std Y+4,r21 + std Z+3,r10 + std Z+11,r11 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r10,Z+4 + ldd r11,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r22,r24 + eor r11,r10 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r22 + mov r25,r10 + mov r0,r22 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r22 + eor r12,r11 + eor r13,r6 + eor r15,r14 + eor r22,r24 + com r6 + std Y+5,r22 + std Z+4,r10 + std Z+12,r11 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r10,Z+5 + ldd r11,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r23,r24 + eor r11,r10 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r23 + mov r25,r10 + mov r0,r23 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r23 + eor r12,r11 + eor r13,r7 + eor r15,r14 + eor r23,r24 + com r7 + std Y+6,r23 + std Z+5,r10 + std Z+13,r11 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r10,Z+6 + ldd r11,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r26,r24 + eor r11,r10 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r10 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r26 + eor r12,r11 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+7,r26 + std Z+6,r10 + std Z+14,r11 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r10,Z+7 + ldd r11,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r27,r24 + eor r11,r10 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r10 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r27 + eor r12,r11 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+8,r27 + std Z+7,r10 + std Z+15,r11 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + bst r10,0 + lsr r13 + ror r12 + ror r11 + ror r10 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r22 + std Z+13,r23 + std Z+14,r26 + std Z+15,r27 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r26,Z+22 + ldd r27,Z+23 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r23 + mov r23,r26 + mov r26,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r22 + std Z+21,r23 + std Z+22,r26 + std Z+23,r27 + movw r10,r2 + movw r12,r4 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + mov r0,r2 + mov r2,r4 + mov r4,r0 + mov r0,r3 + mov r3,r5 + mov r5,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + or r5,r0 + mov r0,r6 + mov r6,r8 + mov r8,r0 + mov r0,r7 + mov r7,r9 + mov r9,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r6,r10 + eor r7,r11 + eor r8,r12 + eor r9,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r26,Z+38 + ldd r27,Z+39 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r12 + mov r12,r0 + mov r0,r11 + mov r11,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r22 + std Z+37,r23 + std Z+38,r26 + std Z+39,r27 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r22,Z+44 + ldd r23,Z+45 + ldd r26,Z+46 + ldd r27,Z+47 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r22 + std Z+45,r23 + std Z+46,r26 + std Z+47,r27 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r22,Z+52 + ldd r23,Z+53 + ldd r26,Z+54 + ldd r27,Z+55 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r22 + std Z+53,r23 + std Z+54,r26 + std Z+55,r27 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r22,Z+60 + ldd r23,Z+61 + ldd r26,Z+62 + ldd r27,Z+63 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r22 + std Z+61,r23 + std Z+62,r26 + std Z+63,r27 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + ldd r10,Y+9 + ldd r11,Y+10 + ldd r12,Y+11 + ldd r13,Y+12 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + ldd r0,Z+12 + eor r10,r0 + ldd r0,Z+13 + eor r11,r0 + ldd r0,Z+14 + eor r12,r0 + ldd r0,Z+15 + eor r13,r0 + ldd r0,Z+32 + eor r10,r0 + ldd r0,Z+33 + eor r11,r0 + ldd r0,Z+34 + eor r12,r0 + ldd r0,Z+35 + eor r13,r0 + ldd r0,Z+52 + eor r10,r0 + ldd r0,Z+53 + eor r11,r0 + ldd r0,Z+54 + eor r12,r0 + ldd r0,Z+55 + eor r13,r0 + std Y+9,r10 + std Y+10,r11 + std Y+11,r12 + std Y+12,r13 + ldd r10,Y+13 + ldd r11,Y+14 + ldd r12,Y+15 + ldd r13,Y+16 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + ldd r0,Z+16 + eor r10,r0 + ldd r0,Z+17 + eor r11,r0 + ldd r0,Z+18 + eor r12,r0 + ldd r0,Z+19 + eor r13,r0 + ldd r0,Z+36 + eor r10,r0 + ldd r0,Z+37 + eor r11,r0 + ldd r0,Z+38 + eor r12,r0 + ldd r0,Z+39 + eor r13,r0 + ldd r0,Z+40 + eor r10,r0 + ldd r0,Z+41 + eor r11,r0 + ldd r0,Z+42 + eor r12,r0 + ldd r0,Z+43 + eor r13,r0 + std Y+13,r10 + std Y+14,r11 + std Y+15,r12 + std Y+16,r13 + ldd r10,Y+17 + ldd r11,Y+18 + ldd r12,Y+19 + ldd r13,Y+20 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + ld r0,Z + eor r10,r0 + ldd r0,Z+1 + eor r11,r0 + ldd r0,Z+2 + eor r12,r0 + ldd r0,Z+3 + eor r13,r0 + ldd r0,Z+20 + eor r10,r0 + ldd r0,Z+21 + eor r11,r0 + ldd r0,Z+22 + eor r12,r0 + ldd r0,Z+23 + eor r13,r0 + ldd r0,Z+44 + eor r10,r0 + ldd r0,Z+45 + eor r11,r0 + ldd r0,Z+46 + eor r12,r0 + ldd r0,Z+47 + eor r13,r0 + std Y+17,r10 + std Y+18,r11 + std Y+19,r12 + std Y+20,r13 + ldd r10,Y+21 + ldd r11,Y+22 + ldd r12,Y+23 + ldd r13,Y+24 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + ldd r0,Z+4 + eor r10,r0 + ldd r0,Z+5 + eor r11,r0 + ldd r0,Z+6 + eor r12,r0 + ldd r0,Z+7 + eor r13,r0 + ldd r0,Z+8 + eor r10,r0 + ldd r0,Z+9 + eor r11,r0 + ldd r0,Z+10 + eor r12,r0 + ldd r0,Z+11 + eor r13,r0 + ldd r0,Z+48 + eor r10,r0 + ldd r0,Z+49 + eor r11,r0 + ldd r0,Z+50 + eor r12,r0 + ldd r0,Z+51 + eor r13,r0 + std Y+21,r10 + std Y+22,r11 + std Y+23,r12 + std Y+24,r13 + ldd r10,Y+25 + dec r10 + std Y+25,r10 + breq 6623f + rjmp 40b +6623: + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + std Z+28,r6 + std Z+29,r7 + std Z+30,r8 + std Z+31,r9 + st -Z,r27 + st -Z,r26 + st -Z,r23 + st -Z,r22 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + ldi r25,72 + add r30,r25 + adc r31,r1 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + ldd r2,Y+17 + ldd r3,Y+18 + ldd r4,Y+19 + ldd r5,Y+20 + ldd r6,Y+21 + ldd r7,Y+22 + ldd r8,Y+23 + ldd r9,Y+24 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + std Z+12,r6 + std Z+13,r7 + std Z+14,r8 + std Z+15,r9 + adiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size drysponge256_g, .-drysponge256_g + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.c b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.c new file mode 100644 index 0000000..6dfe48c --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.c @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-drysponge.h" +#include + +#if !defined(__AVR__) + +/* Right rotations in bit-interleaved format */ +#define intRightRotateEven(x,bits) \ + (__extension__ ({ \ + uint32_t _x0 = (uint32_t)(x); \ + uint32_t _x1 = (uint32_t)((x) >> 32); \ + _x0 = rightRotate(_x0, (bits)); \ + _x1 = rightRotate(_x1, (bits)); \ + _x0 | (((uint64_t)_x1) << 32); \ + })) +#define intRightRotateOdd(x,bits) \ + (__extension__ ({ \ + uint32_t _x0 = (uint32_t)(x); \ + uint32_t _x1 = (uint32_t)((x) >> 32); \ + _x0 = rightRotate(_x0, ((bits) + 1) % 32); \ + _x1 = rightRotate(_x1, (bits)); \ + _x1 | (((uint64_t)_x0) << 32); \ + })) +#define intRightRotate1_64(x) \ + (__extension__ ({ \ + uint32_t _x0 = (uint32_t)(x); \ + uint32_t _x1 = (uint32_t)((x) >> 32); \ + _x0 = rightRotate1(_x0); \ + _x1 | (((uint64_t)_x0) << 32); \ + })) +#define intRightRotate2_64(x) (intRightRotateEven((x), 1)) +#define intRightRotate3_64(x) (intRightRotateOdd((x), 1)) +#define intRightRotate4_64(x) (intRightRotateEven((x), 2)) +#define intRightRotate5_64(x) (intRightRotateOdd((x), 2)) +#define intRightRotate6_64(x) (intRightRotateEven((x), 3)) +#define intRightRotate7_64(x) (intRightRotateOdd((x), 3)) +#define intRightRotate8_64(x) (intRightRotateEven((x), 4)) +#define intRightRotate9_64(x) (intRightRotateOdd((x), 4)) +#define intRightRotate10_64(x) (intRightRotateEven((x), 5)) +#define intRightRotate11_64(x) (intRightRotateOdd((x), 5)) +#define intRightRotate12_64(x) (intRightRotateEven((x), 6)) +#define intRightRotate13_64(x) (intRightRotateOdd((x), 6)) +#define intRightRotate14_64(x) (intRightRotateEven((x), 7)) +#define intRightRotate15_64(x) (intRightRotateOdd((x), 7)) +#define intRightRotate16_64(x) (intRightRotateEven((x), 8)) +#define intRightRotate17_64(x) (intRightRotateOdd((x), 8)) +#define intRightRotate18_64(x) (intRightRotateEven((x), 9)) +#define intRightRotate19_64(x) (intRightRotateOdd((x), 9)) +#define intRightRotate20_64(x) (intRightRotateEven((x), 10)) +#define intRightRotate21_64(x) (intRightRotateOdd((x), 10)) +#define intRightRotate22_64(x) (intRightRotateEven((x), 11)) +#define intRightRotate23_64(x) (intRightRotateOdd((x), 11)) +#define intRightRotate24_64(x) (intRightRotateEven((x), 12)) +#define intRightRotate25_64(x) (intRightRotateOdd((x), 12)) +#define intRightRotate26_64(x) (intRightRotateEven((x), 13)) +#define intRightRotate27_64(x) (intRightRotateOdd((x), 13)) +#define intRightRotate28_64(x) (intRightRotateEven((x), 14)) +#define intRightRotate29_64(x) (intRightRotateOdd((x), 14)) +#define intRightRotate30_64(x) (intRightRotateEven((x), 15)) +#define intRightRotate31_64(x) (intRightRotateOdd((x), 15)) +#define intRightRotate32_64(x) (intRightRotateEven((x), 16)) +#define intRightRotate33_64(x) (intRightRotateOdd((x), 16)) +#define intRightRotate34_64(x) (intRightRotateEven((x), 17)) +#define intRightRotate35_64(x) (intRightRotateOdd((x), 17)) +#define intRightRotate36_64(x) (intRightRotateEven((x), 18)) +#define intRightRotate37_64(x) (intRightRotateOdd((x), 18)) +#define intRightRotate38_64(x) (intRightRotateEven((x), 19)) +#define intRightRotate39_64(x) (intRightRotateOdd((x), 19)) +#define intRightRotate40_64(x) (intRightRotateEven((x), 20)) +#define intRightRotate41_64(x) (intRightRotateOdd((x), 20)) +#define intRightRotate42_64(x) (intRightRotateEven((x), 21)) +#define intRightRotate43_64(x) (intRightRotateOdd((x), 21)) +#define intRightRotate44_64(x) (intRightRotateEven((x), 22)) +#define intRightRotate45_64(x) (intRightRotateOdd((x), 22)) +#define intRightRotate46_64(x) (intRightRotateEven((x), 23)) +#define intRightRotate47_64(x) (intRightRotateOdd((x), 23)) +#define intRightRotate48_64(x) (intRightRotateEven((x), 24)) +#define intRightRotate49_64(x) (intRightRotateOdd((x), 24)) +#define intRightRotate50_64(x) (intRightRotateEven((x), 25)) +#define intRightRotate51_64(x) (intRightRotateOdd((x), 25)) +#define intRightRotate52_64(x) (intRightRotateEven((x), 26)) +#define intRightRotate53_64(x) (intRightRotateOdd((x), 26)) +#define intRightRotate54_64(x) (intRightRotateEven((x), 27)) +#define intRightRotate55_64(x) (intRightRotateOdd((x), 27)) +#define intRightRotate56_64(x) (intRightRotateEven((x), 28)) +#define intRightRotate57_64(x) (intRightRotateOdd((x), 28)) +#define intRightRotate58_64(x) (intRightRotateEven((x), 29)) +#define intRightRotate59_64(x) (intRightRotateOdd((x), 29)) +#define intRightRotate60_64(x) (intRightRotateEven((x), 30)) +#define intRightRotate61_64(x) (intRightRotateOdd((x), 30)) +#define intRightRotate62_64(x) (intRightRotateEven((x), 31)) +#define intRightRotate63_64(x) (intRightRotateOdd((x), 31)) + +void gascon128_core_round(gascon128_state_t *state, uint8_t round) +{ + uint64_t t0, t1, t2, t3, t4; + + /* Load the state into local varaibles */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); +#endif + + /* Add the round constant to the middle of the state */ + x2 ^= ((0x0F - round) << 4) | round; + + /* Substitution layer */ + x0 ^= x4; x2 ^= x1; x4 ^= x3; t0 = (~x0) & x1; t1 = (~x1) & x2; + t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x0; x0 ^= t1; + x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; x1 ^= x0; x3 ^= x2; + x0 ^= x4; x2 = ~x2; + + /* Linear diffusion layer */ + x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); + x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); + x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); + x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); + x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); + + /* Write the local variables back to the state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); +#endif +} + +void gascon256_core_round(gascon256_state_t *state, uint8_t round) +{ + uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8; + + /* Load the state into local varaibles */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; + uint64_t x5 = state->S[5]; + uint64_t x6 = state->S[6]; + uint64_t x7 = state->S[7]; + uint64_t x8 = state->S[8]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); + uint64_t x5 = le_load_word64(state->B + 40); + uint64_t x6 = le_load_word64(state->B + 48); + uint64_t x7 = le_load_word64(state->B + 56); + uint64_t x8 = le_load_word64(state->B + 64); +#endif + + /* Add the round constant to the middle of the state */ + x4 ^= ((0x0F - round) << 4) | round; + + /* Substitution layer */ + x0 ^= x8; x2 ^= x1; x4 ^= x3; x6 ^= x5; x8 ^= x7; t0 = (~x0) & x1; + t1 = (~x1) & x2; t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x5; + t5 = (~x5) & x6; t6 = (~x6) & x7; t7 = (~x7) & x8; t8 = (~x8) & x0; + x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t5; x5 ^= t6; x6 ^= t7; + x7 ^= t8; x8 ^= t0; x1 ^= x0; x3 ^= x2; x5 ^= x4; x7 ^= x6; x0 ^= x8; + x4 = ~x4; + + /* Linear diffusion layer */ + x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); + x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); + x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); + x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); + x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); + x5 ^= intRightRotate31_64(x5) ^ intRightRotate26_64(x5); + x6 ^= intRightRotate53_64(x6) ^ intRightRotate58_64(x6); + x7 ^= intRightRotate9_64(x7) ^ intRightRotate46_64(x7); + x8 ^= intRightRotate43_64(x8) ^ intRightRotate50_64(x8); + + /* Write the local variables back to the state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; + state->S[5] = x5; + state->S[6] = x6; + state->S[7] = x7; + state->S[8] = x8; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); + le_store_word64(state->B + 40, x5); + le_store_word64(state->B + 48, x6); + le_store_word64(state->B + 56, x7); + le_store_word64(state->B + 64, x8); +#endif +} + +void drysponge128_g(drysponge128_state_t *state) +{ + unsigned round; + + /* Perform the first round. For each round we XOR the 16 bytes of + * the output data with the first 16 bytes of the state. And then + * XOR with the next 16 bytes of the state, rotated by 4 bytes */ + gascon128_core_round(&(state->c), 0); + state->r.W[0] = state->c.W[0] ^ state->c.W[5]; + state->r.W[1] = state->c.W[1] ^ state->c.W[6]; + state->r.W[2] = state->c.W[2] ^ state->c.W[7]; + state->r.W[3] = state->c.W[3] ^ state->c.W[4]; + + /* Perform the rest of the rounds */ + for (round = 1; round < state->rounds; ++round) { + gascon128_core_round(&(state->c), round); + state->r.W[0] ^= state->c.W[0] ^ state->c.W[5]; + state->r.W[1] ^= state->c.W[1] ^ state->c.W[6]; + state->r.W[2] ^= state->c.W[2] ^ state->c.W[7]; + state->r.W[3] ^= state->c.W[3] ^ state->c.W[4]; + } +} + +void drysponge256_g(drysponge256_state_t *state) +{ + unsigned round; + + /* Perform the first round. For each round we XOR the 16 bytes of + * the output data with the first 16 bytes of the state. And then + * XOR with the next 16 bytes of the state, rotated by 4 bytes. + * And so on for a total of 64 bytes XOR'ed into the output data. */ + gascon256_core_round(&(state->c), 0); + state->r.W[0] = state->c.W[0] ^ state->c.W[5] ^ + state->c.W[10] ^ state->c.W[15]; + state->r.W[1] = state->c.W[1] ^ state->c.W[6] ^ + state->c.W[11] ^ state->c.W[12]; + state->r.W[2] = state->c.W[2] ^ state->c.W[7] ^ + state->c.W[8] ^ state->c.W[13]; + state->r.W[3] = state->c.W[3] ^ state->c.W[4] ^ + state->c.W[9] ^ state->c.W[14]; + + /* Perform the rest of the rounds */ + for (round = 1; round < state->rounds; ++round) { + gascon256_core_round(&(state->c), round); + state->r.W[0] ^= state->c.W[0] ^ state->c.W[5] ^ + state->c.W[10] ^ state->c.W[15]; + state->r.W[1] ^= state->c.W[1] ^ state->c.W[6] ^ + state->c.W[11] ^ state->c.W[12]; + state->r.W[2] ^= state->c.W[2] ^ state->c.W[7] ^ + state->c.W[8] ^ state->c.W[13]; + state->r.W[3] ^= state->c.W[3] ^ state->c.W[4] ^ + state->c.W[9] ^ state->c.W[14]; + } +} + +#endif /* !__AVR__ */ + +void drysponge128_g_core(drysponge128_state_t *state) +{ + unsigned round; + for (round = 0; round < state->rounds; ++round) + gascon128_core_round(&(state->c), round); +} + +void drysponge256_g_core(drysponge256_state_t *state) +{ + unsigned round; + for (round = 0; round < state->rounds; ++round) + gascon256_core_round(&(state->c), round); +} + +/** + * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) + * \brief Selects an element of x in constant time. + * + * \param x Points to the four elements of x. + * \param index Index of which element to extract between 0 and 3. + * + * \return The selected element of x. + */ +#if !defined(__AVR__) +STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) +{ + /* We need to be careful how we select each element of x because + * we are doing a data-dependent fetch here. Do the fetch in a way + * that should avoid cache timing issues by fetching every element + * of x and masking away the ones we don't want. + * + * There is a possible side channel here with respect to power analysis. + * The "mask" value will be all-ones for the selected index and all-zeroes + * for the other indexes. This may show up as different power consumption + * for the "result ^= x[i] & mask" statement when i is the selected index. + * Such a side channel could in theory allow reading the plaintext input + * to the cipher by analysing the CPU's power consumption. + * + * The DryGASCON specification acknowledges the possibility of plaintext + * recovery in section 7.4. For software mitigation the specification + * suggests randomization of the indexes into c and x and randomization + * of the order of processing words. We aren't doing that here yet. + * Patches welcome to fix this. + */ + uint32_t mask = -((uint32_t)((0x04 - index) >> 2)); + uint32_t result = x[0] & mask; + mask = -((uint32_t)((0x04 - (index ^ 0x01)) >> 2)); + result ^= x[1] & mask; + mask = -((uint32_t)((0x04 - (index ^ 0x02)) >> 2)); + result ^= x[2] & mask; + mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); + return result ^ (x[3] & mask); +} +#else +/* AVR is more or less immune to cache timing issues because it doesn't + * have anything like an L1 or L2 cache. Select the word directly */ +#define drysponge_select_x(x, index) ((x)[(index)]) +#endif + +/** + * \brief Mixes a 32-bit value into the DrySPONGE128 state. + * + * \param state DrySPONGE128 state. + * \param data The data to be mixed in the bottom 10 bits. + */ +static void drysponge128_mix_phase_round + (drysponge128_state_t *state, uint32_t data) +{ + /* Mix in elements from x according to the 2-bit indexes in the data */ + state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); + state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); + state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); + state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); + state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); +} + +/** + * \brief Mixes a 32-bit value into the DrySPONGE256 state. + * + * \param state DrySPONGE256 state. + * \param data The data to be mixed in the bottom 18 bits. + */ +static void drysponge256_mix_phase_round + (drysponge256_state_t *state, uint32_t data) +{ + /* Mix in elements from x according to the 2-bit indexes in the data */ + state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); + state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); + state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); + state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); + state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); + state->c.W[10] ^= drysponge_select_x(state->x.W, (data >> 10) & 0x03); + state->c.W[12] ^= drysponge_select_x(state->x.W, (data >> 12) & 0x03); + state->c.W[14] ^= drysponge_select_x(state->x.W, (data >> 14) & 0x03); + state->c.W[16] ^= drysponge_select_x(state->x.W, (data >> 16) & 0x03); +} + +/** + * \brief Mixes an input block into a DrySPONGE128 state. + * + * \param state The DrySPONGE128 state. + * \param data Full rate block containing the input data. + */ +static void drysponge128_mix_phase + (drysponge128_state_t *state, const unsigned char data[DRYSPONGE128_RATE]) +{ + /* Mix 10-bit groups into the output, with the domain + * separator added to the last two groups */ + drysponge128_mix_phase_round + (state, data[0] | (((uint32_t)(data[1])) << 8)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[1] >> 2) | (((uint32_t)(data[2])) << 6)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[2] >> 4) | (((uint32_t)(data[3])) << 4)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[3] >> 6) | (((uint32_t)(data[4])) << 2)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, data[5] | (((uint32_t)(data[6])) << 8)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[6] >> 2) | (((uint32_t)(data[7])) << 6)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[7] >> 4) | (((uint32_t)(data[8])) << 4)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[8] >> 6) | (((uint32_t)(data[9])) << 2)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, data[10] | (((uint32_t)(data[11])) << 8)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[12] >> 4) | (((uint32_t)(data[13])) << 4)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, ((data[13] >> 6) | (((uint32_t)(data[14])) << 2))); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round(state, data[15] ^ state->domain); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round(state, state->domain >> 10); + + /* Revert to the default domain separator for the next block */ + state->domain = 0; +} + +/** + * \brief Mixes an input block into a DrySPONGE256 state. + * + * \param state The DrySPONGE256 state. + * \param data Full rate block containing the input data. + */ +static void drysponge256_mix_phase + (drysponge256_state_t *state, const unsigned char data[DRYSPONGE256_RATE]) +{ + /* Mix 18-bit groups into the output, with the domain in the last group */ + drysponge256_mix_phase_round + (state, data[0] | (((uint32_t)(data[1])) << 8) | + (((uint32_t)(data[2])) << 16)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[2] >> 2) | (((uint32_t)(data[3])) << 6) | + (((uint32_t)(data[4])) << 14)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[4] >> 4) | (((uint32_t)(data[5])) << 4) | + (((uint32_t)(data[6])) << 12)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[6] >> 6) | (((uint32_t)(data[7])) << 2) | + (((uint32_t)(data[8])) << 10)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, data[9] | (((uint32_t)(data[10])) << 8) | + (((uint32_t)(data[11])) << 16)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6) | + (((uint32_t)(data[13])) << 14)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[13] >> 4) | (((uint32_t)(data[14])) << 4) | + (((uint32_t)(data[15])) << 12)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[15] >> 6) ^ state->domain); + + /* Revert to the default domain separator for the next block */ + state->domain = 0; +} + +void drysponge128_f_absorb + (drysponge128_state_t *state, const unsigned char *input, unsigned len) +{ + if (len >= DRYSPONGE128_RATE) { + drysponge128_mix_phase(state, input); + } else { + unsigned char padded[DRYSPONGE128_RATE]; + memcpy(padded, input, len); + padded[len] = 0x01; + memset(padded + len + 1, 0, DRYSPONGE128_RATE - len - 1); + drysponge128_mix_phase(state, padded); + } +} + +void drysponge256_f_absorb + (drysponge256_state_t *state, const unsigned char *input, unsigned len) +{ + if (len >= DRYSPONGE256_RATE) { + drysponge256_mix_phase(state, input); + } else { + unsigned char padded[DRYSPONGE256_RATE]; + memcpy(padded, input, len); + padded[len] = 0x01; + memset(padded + len + 1, 0, DRYSPONGE256_RATE - len - 1); + drysponge256_mix_phase(state, padded); + } +} + +/** + * \brief Determine if some of the words of an "x" value are identical. + * + * \param x Points to the "x" buffer to check. + * + * \return Non-zero if some of the words are the same, zero if they are + * distinct from each other. + * + * We try to perform the check in constant time to avoid giving away + * any information about the value of the key. + */ +static int drysponge_x_words_are_same(const uint32_t x[4]) +{ + unsigned i, j; + int result = 0; + for (i = 0; i < 3; ++i) { + for (j = i + 1; j < 4; ++j) { + uint32_t check = x[i] ^ x[j]; + result |= (int)((0x100000000ULL - check) >> 32); + } + } + return result; +} + +void drysponge128_setup + (drysponge128_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block) +{ + /* Fill the GASCON-128 state with repeated copies of the key */ + memcpy(state->c.B, key, 16); + memcpy(state->c.B + 16, key, 16); + memcpy(state->c.B + 32, key, 8); + + /* Generate the "x" value for the state. All four words of "x" + * must be unique because they will be used in drysponge_select_x() + * as stand-ins for the bit pairs 00, 01, 10, and 11. + * + * Run the core block operation over and over until "x" is unique. + * Technically the runtime here is key-dependent and not constant. + * If the input key is randomized, this should only take 1 round + * on average so it is "almost constant time". + */ + do { + gascon128_core_round(&(state->c), 0); + } while (drysponge_x_words_are_same(state->c.W)); + memcpy(state->x.W, state->c.W, sizeof(state->x)); + + /* Replace the generated "x" value in the state with the key prefix */ + memcpy(state->c.W, key, sizeof(state->x)); + + /* Absorb the nonce into the state with an increased number of rounds */ + state->rounds = DRYSPONGE128_INIT_ROUNDS; + state->domain = DRYDOMAIN128_NONCE; + if (final_block) + state->domain |= DRYDOMAIN128_FINAL; + drysponge128_f_absorb(state, nonce, 16); + drysponge128_g(state); + + /* Set up the normal number of rounds for future operations */ + state->rounds = DRYSPONGE128_ROUNDS; +} + +void drysponge256_setup + (drysponge256_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block) +{ + /* Fill the GASCON-256 state with repeated copies of the key */ + memcpy(state->c.B, key, 32); + memcpy(state->c.B + 32, key, 32); + memcpy(state->c.B + 64, key, 8); + + /* Generate the "x" value for the state */ + do { + gascon256_core_round(&(state->c), 0); + } while (drysponge_x_words_are_same(state->c.W)); + memcpy(state->x.W, state->c.W, sizeof(state->x)); + + /* Replace the generated "x" value in the state with the key prefix */ + memcpy(state->c.W, key, sizeof(state->x)); + + /* Absorb the nonce into the state with an increased number of rounds */ + state->rounds = DRYSPONGE256_INIT_ROUNDS; + state->domain = DRYDOMAIN256_NONCE; + if (final_block) + state->domain |= DRYDOMAIN256_FINAL; + drysponge256_f_absorb(state, nonce, 16); + drysponge256_g(state); + + /* Set up the normal number of rounds for future operations */ + state->rounds = DRYSPONGE256_ROUNDS; +} diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.h new file mode 100644 index 0000000..05b0c16 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-drysponge.h @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_DRYSPONGE_H +#define LW_INTERNAL_DRYSPONGE_H + +#include "internal-util.h" + +/** + * \file internal-drysponge.h + * \brief Internal implementation of DrySPONGE for the DryGASCON cipher. + * + * References: https://github.com/sebastien-riou/DryGASCON + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the GASCON-128 permutation state in bytes. + */ +#define GASCON128_STATE_SIZE 40 + +/** + * \brief Size of the GASCON-256 permutation state in bytes. + */ +#define GASCON256_STATE_SIZE 72 + +/** + * \brief Rate of absorption and squeezing for DrySPONGE128. + */ +#define DRYSPONGE128_RATE 16 + +/** + * \brief Rate of absorption and squeezing for DrySPONGE256. + */ +#define DRYSPONGE256_RATE 16 + +/** + * \brief Size of the "x" value for DrySPONGE128. + */ +#define DRYSPONGE128_XSIZE 16 + +/** + * \brief Size of the "x" value for DrySPONGE256. + */ +#define DRYSPONGE256_XSIZE 16 + +/** + * \brief Normal number of rounds for DrySPONGE128 when absorbing + * and squeezing data. + */ +#define DRYSPONGE128_ROUNDS 7 + +/** + * \brief Number of rounds for DrySPONGE128 during initialization. + */ +#define DRYSPONGE128_INIT_ROUNDS 11 + +/** + * \brief Normal number of rounds for DrySPONGE256 when absorbing + * and squeezing data. + */ +#define DRYSPONGE256_ROUNDS 8 + +/** + * \brief Number of rounds for DrySPONGE256 during initialization. + */ +#define DRYSPONGE256_INIT_ROUNDS 12 + +/** + * \brief DrySPONGE128 domain bit for a padded block. + */ +#define DRYDOMAIN128_PADDED (1 << 8) + +/** + * \brief DrySPONGE128 domain bit for a final block. + */ +#define DRYDOMAIN128_FINAL (1 << 9) + +/** + * \brief DrySPONGE128 domain value for processing the nonce. + */ +#define DRYDOMAIN128_NONCE (1 << 10) + +/** + * \brief DrySPONGE128 domain value for processing the associated data. + */ +#define DRYDOMAIN128_ASSOC_DATA (2 << 10) + +/** + * \brief DrySPONGE128 domain value for processing the message. + */ +#define DRYDOMAIN128_MESSAGE (3 << 10) + +/** + * \brief DrySPONGE256 domain bit for a padded block. + */ +#define DRYDOMAIN256_PADDED (1 << 2) + +/** + * \brief DrySPONGE256 domain bit for a final block. + */ +#define DRYDOMAIN256_FINAL (1 << 3) + +/** + * \brief DrySPONGE256 domain value for processing the nonce. + */ +#define DRYDOMAIN256_NONCE (1 << 4) + +/** + * \brief DrySPONGE256 domain value for processing the associated data. + */ +#define DRYDOMAIN256_ASSOC_DATA (2 << 4) + +/** + * \brief DrySPONGE256 domain value for processing the message. + */ +#define DRYDOMAIN256_MESSAGE (3 << 4) + +/** + * \brief Internal state of the GASCON-128 permutation. + */ +typedef union +{ + uint64_t S[GASCON128_STATE_SIZE / 8]; /**< 64-bit words of the state */ + uint32_t W[GASCON128_STATE_SIZE / 4]; /**< 32-bit words of the state */ + uint8_t B[GASCON128_STATE_SIZE]; /**< Bytes of the state */ + +} gascon128_state_t; + +/** + * \brief Internal state of the GASCON-256 permutation. + */ +typedef union +{ + uint64_t S[GASCON256_STATE_SIZE / 8]; /**< 64-bit words of the state */ + uint32_t W[GASCON256_STATE_SIZE / 4]; /**< 32-bit words of the state */ + uint8_t B[GASCON256_STATE_SIZE]; /**< Bytes of the state */ + +} gascon256_state_t; + +/** + * \brief Structure of a rate block for DrySPONGE128. + */ +typedef union +{ + uint64_t S[DRYSPONGE128_RATE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE128_RATE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE128_RATE]; /**< Bytes of the rate */ + +} drysponge128_rate_t; + +/** + * \brief Structure of a rate block for DrySPONGE256. + */ +typedef union +{ + uint64_t S[DRYSPONGE256_RATE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE256_RATE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE256_RATE]; /**< Bytes of the rate */ + +} drysponge256_rate_t; + +/** + * \brief Structure of the "x" value for DrySPONGE128. + */ +typedef union +{ + uint64_t S[DRYSPONGE128_XSIZE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE128_XSIZE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE128_XSIZE]; /**< Bytes of the rate */ + +} drysponge128_x_t; + +/** + * \brief Structure of the "x" value for DrySPONGE256. + */ +typedef union +{ + uint64_t S[DRYSPONGE256_XSIZE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE256_XSIZE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE256_XSIZE]; /**< Bytes of the rate */ + +} drysponge256_x_t; + +/** + * \brief Structure of the rolling DrySPONGE128 state. + */ +typedef struct +{ + gascon128_state_t c; /**< GASCON-128 state for the capacity */ + drysponge128_rate_t r; /**< Buffer for a rate block of data */ + drysponge128_x_t x; /**< "x" value for the sponge */ + uint32_t domain; /**< Domain value to mix on next F call */ + uint32_t rounds; /**< Number of rounds for next G call */ + +} drysponge128_state_t; + +/** + * \brief Structure of the rolling DrySPONGE256 state. + */ +typedef struct +{ + gascon256_state_t c; /**< GASCON-256 state for the capacity */ + drysponge256_rate_t r; /**< Buffer for a rate block of data */ + drysponge256_x_t x; /**< "x" value for the sponge */ + uint32_t domain; /**< Domain value to mix on next F call */ + uint32_t rounds; /**< Number of rounds for next G call */ + +} drysponge256_state_t; + +/** + * \brief Permutes the GASCON-128 state using one iteration of CoreRound. + * + * \param state The GASCON-128 state to be permuted. + * \param round The round number. + * + * The input and output \a state will be in little-endian byte order. + */ +void gascon128_core_round(gascon128_state_t *state, uint8_t round); + +/** + * \brief Permutes the GASCON-256 state using one iteration of CoreRound. + * + * \param state The GASCON-256 state to be permuted. + * \param round The round number. + * + * The input and output \a state will be in little-endian byte order. + */ +void gascon256_core_round(gascon256_state_t *state, uint8_t round); + +/** + * \brief Performs the DrySPONGE128 G function which runs the core + * rounds and squeezes data out of the GASGON-128 state. + * + * \param state The DrySPONGE128 state. + * + * The data that is squeezed out will be in state->r on exit. + */ +void drysponge128_g(drysponge128_state_t *state); + +/** + * \brief Performs the DrySPONGE256 G function which runs the core + * rounds and squeezes data out of the GASGON-256 state. + * + * \param state The DrySPONGE256 state. + * + * The data that is squeezed out will be in state->r on exit. + */ +void drysponge256_g(drysponge256_state_t *state); + +/** + * \brief Performs the DrySPONGE128 G function which runs the core + * rounds but does not squeeze out any output. + * + * \param state The DrySPONGE128 state. + */ +void drysponge128_g_core(drysponge128_state_t *state); + +/** + * \brief Performs the DrySPONGE256 G function which runs the core + * rounds but does not squeeze out any output. + * + * \param state The DrySPONGE256 state. + */ +void drysponge256_g_core(drysponge256_state_t *state); + +/** + * \brief Performs the absorption phase of the DrySPONGE128 F function. + * + * \param state The DrySPONGE128 state. + * \param input The block of input data to incorporate into the state. + * \param len The length of the input block, which must be less than + * or equal to DRYSPONGE128_RATE. Smaller input blocks will be padded. + * + * This function must be followed by a call to drysponge128_g() or + * drysponge128_g_core() to perform the full F operation. + */ +void drysponge128_f_absorb + (drysponge128_state_t *state, const unsigned char *input, unsigned len); + +/** + * \brief Performs the absorption phase of the DrySPONGE256 F function. + * + * \param state The DrySPONGE256 state. + * \param input The block of input data to incorporate into the state. + * \param len The length of the input block, which must be less than + * or equal to DRYSPONGE256_RATE. Smaller input blocks will be padded. + * + * This function must be followed by a call to drysponge256_g() or + * drysponge256_g_core() to perform the full F operation. + */ +void drysponge256_f_absorb + (drysponge256_state_t *state, const unsigned char *input, unsigned len); + +/** + * \brief Set up a DrySPONGE128 state to begin encryption or decryption. + * + * \param state The DrySPONGE128 state. + * \param key Points to the 16 bytes of the key. + * \param nonce Points to the 16 bytes of the nonce. + * \param final_block Non-zero if after key setup there will be no more blocks. + */ +void drysponge128_setup + (drysponge128_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block); + +/** + * \brief Set up a DrySPONGE256 state to begin encryption or decryption. + * + * \param state The DrySPONGE256 state. + * \param key Points to the 32 bytes of the key. + * \param nonce Points to the 16 bytes of the nonce. + * \param final_block Non-zero if after key setup there will be no more blocks. + */ +void drysponge256_setup + (drysponge256_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-util.h b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon128/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/api.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/api.h deleted file mode 100644 index de9380d..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 64 diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.c deleted file mode 100644 index e963903..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.c +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "drygascon.h" -#include "internal-drysponge.h" -#include - -aead_cipher_t const drygascon128_cipher = { - "DryGASCON128", - DRYGASCON128_KEY_SIZE, - DRYGASCON128_NONCE_SIZE, - DRYGASCON128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_aead_encrypt, - drygascon128_aead_decrypt -}; - -aead_cipher_t const drygascon256_cipher = { - "DryGASCON256", - DRYGASCON256_KEY_SIZE, - DRYGASCON256_NONCE_SIZE, - DRYGASCON256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_aead_encrypt, - drygascon256_aead_decrypt -}; - -aead_hash_algorithm_t const drygascon128_hash_algorithm = { - "DryGASCON128-HASH", - sizeof(int), - DRYGASCON128_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon128_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const drygascon256_hash_algorithm = { - "DryGASCON256-HASH", - sizeof(int), - DRYGASCON256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - drygascon256_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Processes associated data for DryGASCON128. - * - * \param state DrySPONGE128 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon128_process_ad - (drysponge128_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(state, ad, DRYSPONGE128_RATE); - drysponge128_g_core(state); - ad += DRYSPONGE128_RATE; - adlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN128_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN128_FINAL; - if (adlen < DRYSPONGE128_RATE) - state->domain |= DRYDOMAIN128_PADDED; - drysponge128_f_absorb(state, ad, (unsigned)adlen); - drysponge128_g(state); -} - -/** - * \brief Processes associated data for DryGASCON256. - * - * \param state DrySPONGE256 sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must not be zero. - * \param finalize Non-zero to finalize packet processing because - * the message is zero-length. - */ -static void drygascon256_process_ad - (drysponge256_state_t *state, const unsigned char *ad, - unsigned long long adlen, int finalize) -{ - /* Process all blocks except the last one */ - while (adlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(state, ad, DRYSPONGE256_RATE); - drysponge256_g_core(state); - ad += DRYSPONGE256_RATE; - adlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state->domain = DRYDOMAIN256_ASSOC_DATA; - if (finalize) - state->domain |= DRYDOMAIN256_FINAL; - if (adlen < DRYSPONGE256_RATE) - state->domain |= DRYDOMAIN256_PADDED; - drysponge256_f_absorb(state, ad, (unsigned)adlen); - drysponge256_g(state); -} - -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge128_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE128_RATE) { - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - mlen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (mlen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)mlen; - drysponge128_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge128_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, DRYGASCON128_TAG_SIZE); - return 0; -} - -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge128_state_t state; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON128_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON128_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON128_TAG_SIZE; - drysponge128_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon128_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE128_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE128_RATE); - drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); - drysponge128_g(&state); - c += DRYSPONGE128_RATE; - m += DRYSPONGE128_RATE; - clen -= DRYSPONGE128_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; - if (clen < DRYSPONGE128_RATE) - state.domain |= DRYDOMAIN128_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge128_f_absorb(&state, m, temp); - drysponge128_g(&state); - c += temp; - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state.r.B, c, DRYGASCON128_TAG_SIZE); -} - -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - drysponge256_setup(&state, k, npub, adlen == 0 && mlen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, mlen == 0); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Processs all blocks except the last one */ - while (mlen > DRYSPONGE256_RATE) { - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - mlen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (mlen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)mlen; - drysponge256_f_absorb(&state, m, temp); - lw_xor_block_2_src(c, m, state.r.B, temp); - drysponge256_g(&state); - c += temp; - } - - /* Generate the authentication tag */ - memcpy(c, state.r.B, 16); - drysponge256_g(&state); - memcpy(c + 16, state.r.B, 16); - return 0; -} - -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - drysponge256_state_t state; - unsigned char *mtemp = m; - unsigned temp; - int result; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DRYGASCON256_TAG_SIZE) - return -1; - *mlen = clen - DRYGASCON256_TAG_SIZE; - - /* Initialize the sponge state with the key and nonce */ - clen -= DRYGASCON256_TAG_SIZE; - drysponge256_setup(&state, k, npub, adlen == 0 && clen == 0); - - /* Process the associated data */ - if (adlen > 0) - drygascon256_process_ad(&state, ad, adlen, clen == 0); - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - /* Processs all blocks except the last one */ - while (clen > DRYSPONGE256_RATE) { - lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE256_RATE); - drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); - drysponge256_g(&state); - c += DRYSPONGE256_RATE; - m += DRYSPONGE256_RATE; - clen -= DRYSPONGE256_RATE; - } - - /* Process the last block with domain separation and padding */ - state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; - if (clen < DRYSPONGE256_RATE) - state.domain |= DRYDOMAIN256_PADDED; - temp = (unsigned)clen; - lw_xor_block_2_src(m, c, state.r.B, temp); - drysponge256_f_absorb(&state, m, temp); - drysponge256_g(&state); - c += temp; - } - - /* Check the authentication tag which is split into two pieces */ - result = aead_check_tag(0, 0, state.r.B, c, 16); - drysponge256_g(&state); - return aead_check_tag_precheck - (mtemp, *mlen, state.r.B, c + 16, 16, ~result); -} - -/** - * \brief Precomputed initialization vector for DryGASCON128-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE128. - */ -static unsigned char const drygascon128_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89 -}; - -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge128_state_t state; - memcpy(state.c.B, drygascon128_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon128_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE128_ROUNDS; - drygascon128_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge128_g(&state); - memcpy(out + 16, state.r.B, 16); - return 0; -} - -/** - * \brief Precomputed initialization vector for DryGASCON256-HASH. - * - * This is the CST_H value from the DryGASCON specification after it - * has been processed by the key setup function for DrySPONGE256. - */ -static unsigned char const drygascon256_hash_init[] = { - /* c */ - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, - 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, - 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, - 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, - /* x */ - 0x45, 0x28, 0x21, 0xe6, 0x38, 0xd0, 0x13, 0x77, - 0xbe, 0x54, 0x66, 0xcf, 0x34, 0xe9, 0x0c, 0x6c -}; - -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - drysponge256_state_t state; - memcpy(state.c.B, drygascon256_hash_init, sizeof(state.c.B)); - memcpy(state.x.B, drygascon256_hash_init + sizeof(state.c.B), - sizeof(state.x.B)); - state.domain = 0; - state.rounds = DRYSPONGE256_ROUNDS; - drygascon256_process_ad(&state, in, inlen, 1); - memcpy(out, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 16, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 32, state.r.B, 16); - drysponge256_g(&state); - memcpy(out + 48, state.r.B, 16); - return 0; -} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.h deleted file mode 100644 index 12e18c3..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/drygascon.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_DRYGASCON_H -#define LWCRYPTO_DRYGASCON_H - -#include "aead-common.h" - -/** - * \file drygascon.h - * \brief DryGASCON authenticated encryption algorithm. - * - * DryGASCON is a family of authenticated encryption algorithms based - * around a generalised version of the ASCON permutation. DryGASCON - * is designed to provide some protection against power analysis. - * - * There are four algorithms in the DryGASCON family: - * - * \li DryGASCON128 is an authenticated encryption algorithm with a - * 128-bit key, a 128-bit nonce, and a 128-bit authentication tag. - * \li DryGASCON256 is an authenticated encryption algorithm with a - * 256-bit key, a 128-bit nonce, and a 128-256 authentication tag. - * \li DryGASCON128-HASH is a hash algorithm with a 256-bit output. - * \li DryGASCON256-HASH is a hash algorithm with a 512-bit output. - * - * DryGASCON128 and DryGASCON128-HASH are the primary members of the family. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for DryGASCON128. - */ -#define DRYGASCON128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for DryGASCON128. - */ -#define DRYGASCON128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for DryGASCON128. - */ -#define DRYGASCON128_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON128-HASH. - */ -#define DRYGASCON128_HASH_SIZE 32 - -/** - * \brief Size of the key for DryGASCON256. - */ -#define DRYGASCON256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for DryGASCON256. - */ -#define DRYGASCON256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for DryGASCON256. - */ -#define DRYGASCON256_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for DryGASCON256-HASH. - */ -#define DRYGASCON256_HASH_SIZE 64 - -/** - * \brief Meta-information block for the DryGASCON128 cipher. - */ -extern aead_cipher_t const drygascon128_cipher; - -/** - * \brief Meta-information block for the DryGASCON256 cipher. - */ -extern aead_cipher_t const drygascon256_cipher; - -/** - * \brief Meta-information block for DryGASCON128-HASH. - */ -extern aead_hash_algorithm_t const drygascon128_hash_algorithm; - -/** - * \brief Meta-information block for DryGASCON256-HASH. - */ -extern aead_hash_algorithm_t const drygascon256_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with DryGASCON128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon128_aead_decrypt() - */ -int drygascon128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon128_aead_encrypt() - */ -int drygascon128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with DryGASCON256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa drygascon256_aead_decrypt() - */ -int drygascon256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with DryGASCON256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa drygascon256_aead_encrypt() - */ -int drygascon256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with DRYGASCON128. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON128_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon128_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with DRYGASCON256. - * - * \param out Buffer to receive the hash output which must be at least - * DRYGASCON256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int drygascon256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/hash.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/hash.c deleted file mode 100644 index 6383146..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "drygascon.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return drygascon256_hash(out, in, inlen); -} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge-avr.S b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge-avr.S deleted file mode 100644 index 84d0ff8..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge-avr.S +++ /dev/null @@ -1,5092 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gascon128_core_round - .type gascon128_core_round, @function -gascon128_core_round: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - eor r4,r22 - ldd r23,Z+8 - ldd r12,Z+24 - ldd r13,Z+32 - eor r18,r13 - eor r4,r23 - eor r13,r12 - mov r14,r23 - mov r0,r18 - com r0 - and r14,r0 - mov r15,r4 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r4 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r18 - mov r0,r13 - com r0 - and r16,r0 - eor r18,r15 - eor r23,r24 - eor r4,r25 - eor r12,r16 - eor r13,r14 - eor r23,r18 - eor r12,r4 - eor r18,r13 - com r4 - st Z,r18 - std Z+8,r23 - std Z+24,r12 - std Z+32,r13 - ldd r23,Z+9 - ldd r12,Z+25 - ldd r13,Z+33 - eor r19,r13 - eor r5,r23 - eor r13,r12 - mov r14,r23 - mov r0,r19 - com r0 - and r14,r0 - mov r15,r5 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r5 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r19 - mov r0,r13 - com r0 - and r16,r0 - eor r19,r15 - eor r23,r24 - eor r5,r25 - eor r12,r16 - eor r13,r14 - eor r23,r19 - eor r12,r5 - eor r19,r13 - com r5 - std Z+1,r19 - std Z+9,r23 - std Z+25,r12 - std Z+33,r13 - ldd r23,Z+10 - ldd r12,Z+26 - ldd r13,Z+34 - eor r20,r13 - eor r6,r23 - eor r13,r12 - mov r14,r23 - mov r0,r20 - com r0 - and r14,r0 - mov r15,r6 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r6 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r20 - mov r0,r13 - com r0 - and r16,r0 - eor r20,r15 - eor r23,r24 - eor r6,r25 - eor r12,r16 - eor r13,r14 - eor r23,r20 - eor r12,r6 - eor r20,r13 - com r6 - std Z+2,r20 - std Z+10,r23 - std Z+26,r12 - std Z+34,r13 - ldd r23,Z+11 - ldd r12,Z+27 - ldd r13,Z+35 - eor r21,r13 - eor r7,r23 - eor r13,r12 - mov r14,r23 - mov r0,r21 - com r0 - and r14,r0 - mov r15,r7 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r7 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r21 - mov r0,r13 - com r0 - and r16,r0 - eor r21,r15 - eor r23,r24 - eor r7,r25 - eor r12,r16 - eor r13,r14 - eor r23,r21 - eor r12,r7 - eor r21,r13 - com r7 - std Z+3,r21 - std Z+11,r23 - std Z+27,r12 - std Z+35,r13 - ldd r23,Z+12 - ldd r12,Z+28 - ldd r13,Z+36 - eor r26,r13 - eor r8,r23 - eor r13,r12 - mov r14,r23 - mov r0,r26 - com r0 - and r14,r0 - mov r15,r8 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r8 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r26 - mov r0,r13 - com r0 - and r16,r0 - eor r26,r15 - eor r23,r24 - eor r8,r25 - eor r12,r16 - eor r13,r14 - eor r23,r26 - eor r12,r8 - eor r26,r13 - com r8 - std Z+4,r26 - std Z+12,r23 - std Z+28,r12 - std Z+36,r13 - ldd r23,Z+13 - ldd r12,Z+29 - ldd r13,Z+37 - eor r27,r13 - eor r9,r23 - eor r13,r12 - mov r14,r23 - mov r0,r27 - com r0 - and r14,r0 - mov r15,r9 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r9 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r27 - mov r0,r13 - com r0 - and r16,r0 - eor r27,r15 - eor r23,r24 - eor r9,r25 - eor r12,r16 - eor r13,r14 - eor r23,r27 - eor r12,r9 - eor r27,r13 - com r9 - std Z+5,r27 - std Z+13,r23 - std Z+29,r12 - std Z+37,r13 - ldd r23,Z+14 - ldd r12,Z+30 - ldd r13,Z+38 - eor r2,r13 - eor r10,r23 - eor r13,r12 - mov r14,r23 - mov r0,r2 - com r0 - and r14,r0 - mov r15,r10 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r10 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r2 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r15 - eor r23,r24 - eor r10,r25 - eor r12,r16 - eor r13,r14 - eor r23,r2 - eor r12,r10 - eor r2,r13 - com r10 - std Z+6,r2 - std Z+14,r23 - std Z+30,r12 - std Z+38,r13 - ldd r23,Z+15 - ldd r12,Z+31 - ldd r13,Z+39 - eor r3,r13 - eor r11,r23 - eor r13,r12 - mov r14,r23 - mov r0,r3 - com r0 - and r14,r0 - mov r15,r11 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r12 - mov r0,r11 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r12 - com r0 - and r25,r0 - mov r16,r3 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r15 - eor r23,r24 - eor r11,r25 - eor r12,r16 - eor r13,r14 - eor r23,r3 - eor r12,r11 - eor r3,r13 - com r11 - std Z+7,r3 - std Z+15,r23 - std Z+31,r12 - std Z+39,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r18 - std Z+25,r19 - std Z+26,r20 - std Z+27,r21 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r18 - eor r25,r19 - eor r16,r20 - eor r17,r21 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r24 - eor r19,r25 - eor r20,r16 - eor r21,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gascon128_core_round, .-gascon128_core_round - - .text -.global drysponge128_g - .type drysponge128_g, @function -drysponge128_g: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - subi r30,180 - sbci r31,255 - ld r19,Z - subi r30,76 - sbc r31,r1 - ldi r18,240 - std Z+40,r1 - std Z+41,r1 - std Z+42,r1 - std Z+43,r1 - std Z+44,r1 - std Z+45,r1 - std Z+46,r1 - std Z+47,r1 - std Z+48,r1 - std Z+49,r1 - std Z+50,r1 - std Z+51,r1 - std Z+52,r1 - std Z+53,r1 - std Z+54,r1 - std Z+55,r1 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 -38: - eor r4,r18 - ldd r12,Z+8 - ldd r13,Z+24 - ldd r14,Z+32 - eor r20,r14 - eor r4,r12 - eor r14,r13 - mov r15,r12 - mov r0,r20 - com r0 - and r15,r0 - mov r24,r4 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r4 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r20 - mov r0,r14 - com r0 - and r17,r0 - eor r20,r24 - eor r12,r25 - eor r4,r16 - eor r13,r17 - eor r14,r15 - eor r12,r20 - eor r13,r4 - eor r20,r14 - com r4 - st Z,r20 - std Z+8,r12 - std Z+24,r13 - std Z+32,r14 - ldd r12,Z+9 - ldd r13,Z+25 - ldd r14,Z+33 - eor r21,r14 - eor r5,r12 - eor r14,r13 - mov r15,r12 - mov r0,r21 - com r0 - and r15,r0 - mov r24,r5 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r5 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r21 - mov r0,r14 - com r0 - and r17,r0 - eor r21,r24 - eor r12,r25 - eor r5,r16 - eor r13,r17 - eor r14,r15 - eor r12,r21 - eor r13,r5 - eor r21,r14 - com r5 - std Z+1,r21 - std Z+9,r12 - std Z+25,r13 - std Z+33,r14 - ldd r12,Z+10 - ldd r13,Z+26 - ldd r14,Z+34 - eor r22,r14 - eor r6,r12 - eor r14,r13 - mov r15,r12 - mov r0,r22 - com r0 - and r15,r0 - mov r24,r6 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r6 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r22 - mov r0,r14 - com r0 - and r17,r0 - eor r22,r24 - eor r12,r25 - eor r6,r16 - eor r13,r17 - eor r14,r15 - eor r12,r22 - eor r13,r6 - eor r22,r14 - com r6 - std Z+2,r22 - std Z+10,r12 - std Z+26,r13 - std Z+34,r14 - ldd r12,Z+11 - ldd r13,Z+27 - ldd r14,Z+35 - eor r23,r14 - eor r7,r12 - eor r14,r13 - mov r15,r12 - mov r0,r23 - com r0 - and r15,r0 - mov r24,r7 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r7 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r23 - mov r0,r14 - com r0 - and r17,r0 - eor r23,r24 - eor r12,r25 - eor r7,r16 - eor r13,r17 - eor r14,r15 - eor r12,r23 - eor r13,r7 - eor r23,r14 - com r7 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r14 - ldd r12,Z+12 - ldd r13,Z+28 - ldd r14,Z+36 - eor r26,r14 - eor r8,r12 - eor r14,r13 - mov r15,r12 - mov r0,r26 - com r0 - and r15,r0 - mov r24,r8 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r8 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r26 - mov r0,r14 - com r0 - and r17,r0 - eor r26,r24 - eor r12,r25 - eor r8,r16 - eor r13,r17 - eor r14,r15 - eor r12,r26 - eor r13,r8 - eor r26,r14 - com r8 - std Z+4,r26 - std Z+12,r12 - std Z+28,r13 - std Z+36,r14 - ldd r12,Z+13 - ldd r13,Z+29 - ldd r14,Z+37 - eor r27,r14 - eor r9,r12 - eor r14,r13 - mov r15,r12 - mov r0,r27 - com r0 - and r15,r0 - mov r24,r9 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r9 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r27 - mov r0,r14 - com r0 - and r17,r0 - eor r27,r24 - eor r12,r25 - eor r9,r16 - eor r13,r17 - eor r14,r15 - eor r12,r27 - eor r13,r9 - eor r27,r14 - com r9 - std Z+5,r27 - std Z+13,r12 - std Z+29,r13 - std Z+37,r14 - ldd r12,Z+14 - ldd r13,Z+30 - ldd r14,Z+38 - eor r2,r14 - eor r10,r12 - eor r14,r13 - mov r15,r12 - mov r0,r2 - com r0 - and r15,r0 - mov r24,r10 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r10 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r2 - mov r0,r14 - com r0 - and r17,r0 - eor r2,r24 - eor r12,r25 - eor r10,r16 - eor r13,r17 - eor r14,r15 - eor r12,r2 - eor r13,r10 - eor r2,r14 - com r10 - std Z+6,r2 - std Z+14,r12 - std Z+30,r13 - std Z+38,r14 - ldd r12,Z+15 - ldd r13,Z+31 - ldd r14,Z+39 - eor r3,r14 - eor r11,r12 - eor r14,r13 - mov r15,r12 - mov r0,r3 - com r0 - and r15,r0 - mov r24,r11 - mov r0,r12 - com r0 - and r24,r0 - mov r25,r13 - mov r0,r11 - com r0 - and r25,r0 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - mov r17,r3 - mov r0,r14 - com r0 - and r17,r0 - eor r3,r24 - eor r12,r25 - eor r11,r16 - eor r13,r17 - eor r14,r15 - eor r12,r3 - eor r13,r11 - eor r3,r14 - com r11 - std Z+7,r3 - std Z+15,r12 - std Z+31,r13 - std Z+39,r14 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - lsl r24 - rol r25 - rol r16 - rol r17 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - bst r12,0 - lsr r15 - ror r14 - ror r13 - ror r12 - bld r15,7 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r24 - eor r5,r25 - eor r6,r16 - eor r7,r17 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r20,Z+24 - ldd r21,Z+25 - ldd r22,Z+26 - ldd r23,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+24,r20 - std Z+25,r21 - std Z+26,r22 - std Z+27,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r2 - std Z+31,r3 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r22,Z+34 - ldd r23,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - or r23,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - std Z+32,r20 - std Z+33,r21 - std Z+34,r22 - std Z+35,r23 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r12,r20 - movw r14,r22 - movw r24,r26 - movw r16,r2 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r1 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r15,r0 - mov r0,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r0 - or r17,r0 - eor r24,r20 - eor r25,r21 - eor r16,r22 - eor r17,r23 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - mov r0,r20 - mov r20,r22 - mov r22,r0 - mov r0,r21 - mov r21,r23 - mov r23,r0 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - adc r20,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - ldd r12,Z+40 - ldd r13,Z+41 - ldd r14,Z+42 - ldd r15,Z+43 - eor r12,r20 - eor r13,r21 - eor r14,r22 - eor r15,r23 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - std Z+40,r12 - std Z+41,r13 - std Z+42,r14 - std Z+43,r15 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - ldd r0,Z+24 - eor r12,r0 - ldd r0,Z+25 - eor r13,r0 - ldd r0,Z+26 - eor r14,r0 - ldd r0,Z+27 - eor r15,r0 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ldd r12,Z+48 - ldd r13,Z+49 - ldd r14,Z+50 - ldd r15,Z+51 - ldd r0,Z+8 - eor r12,r0 - ldd r0,Z+9 - eor r13,r0 - ldd r0,Z+10 - eor r14,r0 - ldd r0,Z+11 - eor r15,r0 - ldd r0,Z+28 - eor r12,r0 - ldd r0,Z+29 - eor r13,r0 - ldd r0,Z+30 - eor r14,r0 - ldd r0,Z+31 - eor r15,r0 - std Z+48,r12 - std Z+49,r13 - std Z+50,r14 - std Z+51,r15 - ldd r12,Z+52 - ldd r13,Z+53 - ldd r14,Z+54 - ldd r15,Z+55 - ldd r0,Z+12 - eor r12,r0 - ldd r0,Z+13 - eor r13,r0 - ldd r0,Z+14 - eor r14,r0 - ldd r0,Z+15 - eor r15,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - std Z+52,r12 - std Z+53,r13 - std Z+54,r14 - std Z+55,r15 - subi r18,15 - dec r19 - breq 5904f - rjmp 38b -5904: - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size drysponge128_g, .-drysponge128_g - - .text -.global gascon256_core_round - .type gascon256_core_round, @function -gascon256_core_round: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 26 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r26,Z+ - ld r27,Z+ - ld r2,Z+ - ld r3,Z+ - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - eor r4,r22 - ld r22,Z - ldd r23,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r23,r22 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r22 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r18 - eor r12,r23 - eor r13,r4 - eor r15,r14 - eor r18,r24 - com r4 - std Y+1,r18 - st Z,r22 - std Z+8,r23 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r22,Z+1 - ldd r23,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r23,r22 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r22 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r19 - eor r12,r23 - eor r13,r5 - eor r15,r14 - eor r19,r24 - com r5 - std Y+2,r19 - std Z+1,r22 - std Z+9,r23 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r22,Z+2 - ldd r23,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r23,r22 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r22 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r20 - eor r12,r23 - eor r13,r6 - eor r15,r14 - eor r20,r24 - com r6 - std Y+3,r20 - std Z+2,r22 - std Z+10,r23 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r22,Z+3 - ldd r23,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r23,r22 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r22 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r21 - eor r12,r23 - eor r13,r7 - eor r15,r14 - eor r21,r24 - com r7 - std Y+4,r21 - std Z+3,r22 - std Z+11,r23 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r22,Z+4 - ldd r23,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r26,r24 - eor r23,r22 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r22 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r26 - eor r12,r23 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+5,r26 - std Z+4,r22 - std Z+12,r23 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r22,Z+5 - ldd r23,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r27,r24 - eor r23,r22 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r22 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r27 - eor r12,r23 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+6,r27 - std Z+5,r22 - std Z+13,r23 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r22,Z+6 - ldd r23,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r2,r24 - eor r23,r22 - eor r10,r12 - eor r14,r13 - eor r24,r15 - mov r17,r2 - mov r25,r22 - mov r0,r2 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r10 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r10 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r2 - eor r12,r23 - eor r13,r10 - eor r15,r14 - eor r2,r24 - com r10 - std Y+7,r2 - std Z+6,r22 - std Z+14,r23 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r22,Z+7 - ldd r23,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r3,r24 - eor r23,r22 - eor r11,r12 - eor r14,r13 - eor r24,r15 - mov r17,r3 - mov r25,r22 - mov r0,r3 - com r0 - and r25,r0 - mov r16,r23 - mov r0,r22 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r12 - mov r0,r23 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r11 - mov r0,r12 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r13 - mov r0,r11 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r22,r3 - eor r12,r23 - eor r13,r11 - eor r15,r14 - eor r3,r24 - com r11 - std Y+8,r3 - std Z+7,r22 - std Z+15,r23 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - bst r22,0 - lsr r13 - ror r12 - ror r23 - ror r22 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r26 - std Z+21,r27 - std Z+22,r2 - std Z+23,r3 - movw r22,r4 - movw r12,r6 - movw r14,r8 - movw r24,r10 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r4 - eor r15,r5 - eor r24,r6 - eor r25,r7 - eor r22,r8 - eor r23,r9 - eor r12,r10 - eor r13,r11 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r1 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - eor r4,r14 - eor r5,r15 - eor r6,r24 - eor r7,r25 - eor r8,r22 - eor r9,r23 - eor r10,r12 - eor r11,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r12 - mov r12,r0 - mov r0,r23 - mov r23,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r26 - std Z+45,r27 - std Z+46,r2 - std Z+47,r3 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r26,Z+52 - ldd r27,Z+53 - ldd r2,Z+54 - ldd r3,Z+55 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r26 - std Z+53,r27 - std Z+54,r2 - std Z+55,r3 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r26,Z+60 - ldd r27,Z+61 - ldd r2,Z+62 - ldd r3,Z+63 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r13 - mov r13,r12 - mov r12,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - lsl r22 - rol r23 - rol r12 - rol r13 - adc r22,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r27 - ror r26 - ror r0 - or r3,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r26 - std Z+61,r27 - std Z+62,r2 - std Z+63,r3 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r2,Y+7 - ldd r3,Y+8 - movw r22,r18 - movw r12,r20 - movw r14,r26 - movw r24,r2 - mov r0,r22 - mov r22,r23 - mov r23,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - lsr r13 - ror r12 - ror r23 - ror r22 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r22,r26 - eor r23,r27 - eor r12,r2 - eor r13,r3 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r26 - mov r26,r2 - mov r2,r0 - mov r0,r27 - mov r27,r3 - mov r3,r0 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - adc r26,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r26,r22 - eor r27,r23 - eor r2,r12 - eor r3,r13 - std Z+24,r4 - std Z+25,r5 - std Z+26,r6 - std Z+27,r7 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - st -Z,r3 - st -Z,r2 - st -Z,r27 - st -Z,r26 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - adiw r28,8 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gascon256_core_round, .-gascon256_core_round - - .text -.global drysponge256_g - .type drysponge256_g, @function -drysponge256_g: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 44 - subi r30,148 - sbci r31,255 - ld r19,Z - subi r30,108 - sbc r31,r1 - ldi r18,240 - std Y+25,r19 - std Y+26,r18 - std Y+9,r1 - std Y+10,r1 - std Y+11,r1 - std Y+12,r1 - std Y+13,r1 - std Y+14,r1 - std Y+15,r1 - std Y+16,r1 - std Y+17,r1 - std Y+18,r1 - std Y+19,r1 - std Y+20,r1 - std Y+21,r1 - std Y+22,r1 - std Y+23,r1 - std Y+24,r1 - ld r18,Z+ - ld r19,Z+ - ld r20,Z+ - ld r21,Z+ - ld r22,Z+ - ld r23,Z+ - ld r26,Z+ - ld r27,Z+ - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 -40: - ldd r24,Y+26 - eor r2,r24 - subi r24,15 - std Y+26,r24 - ld r10,Z - ldd r11,Z+8 - ldd r12,Z+16 - ldd r13,Z+32 - ldd r14,Z+40 - ldd r15,Z+48 - ldd r24,Z+56 - eor r18,r24 - eor r11,r10 - eor r2,r12 - eor r14,r13 - eor r24,r15 - mov r17,r18 - mov r25,r10 - mov r0,r18 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r18,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r2 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r2 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r2,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r18 - eor r12,r11 - eor r13,r2 - eor r15,r14 - eor r18,r24 - com r2 - std Y+1,r18 - st Z,r10 - std Z+8,r11 - std Z+16,r12 - std Z+32,r13 - std Z+40,r14 - std Z+48,r15 - std Z+56,r24 - ldd r10,Z+1 - ldd r11,Z+9 - ldd r12,Z+17 - ldd r13,Z+33 - ldd r14,Z+41 - ldd r15,Z+49 - ldd r24,Z+57 - eor r19,r24 - eor r11,r10 - eor r3,r12 - eor r14,r13 - eor r24,r15 - mov r17,r19 - mov r25,r10 - mov r0,r19 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r19,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r3 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r3 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r3,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r19 - eor r12,r11 - eor r13,r3 - eor r15,r14 - eor r19,r24 - com r3 - std Y+2,r19 - std Z+1,r10 - std Z+9,r11 - std Z+17,r12 - std Z+33,r13 - std Z+41,r14 - std Z+49,r15 - std Z+57,r24 - ldd r10,Z+2 - ldd r11,Z+10 - ldd r12,Z+18 - ldd r13,Z+34 - ldd r14,Z+42 - ldd r15,Z+50 - ldd r24,Z+58 - eor r20,r24 - eor r11,r10 - eor r4,r12 - eor r14,r13 - eor r24,r15 - mov r17,r20 - mov r25,r10 - mov r0,r20 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r20,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r4 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r4 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r4,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r20 - eor r12,r11 - eor r13,r4 - eor r15,r14 - eor r20,r24 - com r4 - std Y+3,r20 - std Z+2,r10 - std Z+10,r11 - std Z+18,r12 - std Z+34,r13 - std Z+42,r14 - std Z+50,r15 - std Z+58,r24 - ldd r10,Z+3 - ldd r11,Z+11 - ldd r12,Z+19 - ldd r13,Z+35 - ldd r14,Z+43 - ldd r15,Z+51 - ldd r24,Z+59 - eor r21,r24 - eor r11,r10 - eor r5,r12 - eor r14,r13 - eor r24,r15 - mov r17,r21 - mov r25,r10 - mov r0,r21 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r21,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r5 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r5 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r5,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r21 - eor r12,r11 - eor r13,r5 - eor r15,r14 - eor r21,r24 - com r5 - std Y+4,r21 - std Z+3,r10 - std Z+11,r11 - std Z+19,r12 - std Z+35,r13 - std Z+43,r14 - std Z+51,r15 - std Z+59,r24 - ldd r10,Z+4 - ldd r11,Z+12 - ldd r12,Z+20 - ldd r13,Z+36 - ldd r14,Z+44 - ldd r15,Z+52 - ldd r24,Z+60 - eor r22,r24 - eor r11,r10 - eor r6,r12 - eor r14,r13 - eor r24,r15 - mov r17,r22 - mov r25,r10 - mov r0,r22 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r22,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r6 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r6 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r6,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r22 - eor r12,r11 - eor r13,r6 - eor r15,r14 - eor r22,r24 - com r6 - std Y+5,r22 - std Z+4,r10 - std Z+12,r11 - std Z+20,r12 - std Z+36,r13 - std Z+44,r14 - std Z+52,r15 - std Z+60,r24 - ldd r10,Z+5 - ldd r11,Z+13 - ldd r12,Z+21 - ldd r13,Z+37 - ldd r14,Z+45 - ldd r15,Z+53 - ldd r24,Z+61 - eor r23,r24 - eor r11,r10 - eor r7,r12 - eor r14,r13 - eor r24,r15 - mov r17,r23 - mov r25,r10 - mov r0,r23 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r23,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r7 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r7 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r7,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r23 - eor r12,r11 - eor r13,r7 - eor r15,r14 - eor r23,r24 - com r7 - std Y+6,r23 - std Z+5,r10 - std Z+13,r11 - std Z+21,r12 - std Z+37,r13 - std Z+45,r14 - std Z+53,r15 - std Z+61,r24 - ldd r10,Z+6 - ldd r11,Z+14 - ldd r12,Z+22 - ldd r13,Z+38 - ldd r14,Z+46 - ldd r15,Z+54 - ldd r24,Z+62 - eor r26,r24 - eor r11,r10 - eor r8,r12 - eor r14,r13 - eor r24,r15 - mov r17,r26 - mov r25,r10 - mov r0,r26 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r26,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r8 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r8 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r8,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r26 - eor r12,r11 - eor r13,r8 - eor r15,r14 - eor r26,r24 - com r8 - std Y+7,r26 - std Z+6,r10 - std Z+14,r11 - std Z+22,r12 - std Z+38,r13 - std Z+46,r14 - std Z+54,r15 - std Z+62,r24 - ldd r10,Z+7 - ldd r11,Z+15 - ldd r12,Z+23 - ldd r13,Z+39 - ldd r14,Z+47 - ldd r15,Z+55 - ldd r24,Z+63 - eor r27,r24 - eor r11,r10 - eor r9,r12 - eor r14,r13 - eor r24,r15 - mov r17,r27 - mov r25,r10 - mov r0,r27 - com r0 - and r25,r0 - mov r16,r11 - mov r0,r10 - com r0 - and r16,r0 - eor r27,r16 - mov r16,r12 - mov r0,r11 - com r0 - and r16,r0 - eor r10,r16 - mov r16,r9 - mov r0,r12 - com r0 - and r16,r0 - eor r11,r16 - mov r16,r13 - mov r0,r9 - com r0 - and r16,r0 - eor r12,r16 - mov r16,r14 - mov r0,r13 - com r0 - and r16,r0 - eor r9,r16 - mov r16,r15 - mov r0,r14 - com r0 - and r16,r0 - eor r13,r16 - mov r16,r24 - mov r0,r15 - com r0 - and r16,r0 - eor r14,r16 - mov r0,r24 - com r0 - and r17,r0 - eor r15,r17 - eor r24,r25 - eor r10,r27 - eor r12,r11 - eor r13,r9 - eor r15,r14 - eor r27,r24 - com r9 - std Y+8,r27 - std Z+7,r10 - std Z+15,r11 - std Z+23,r12 - std Z+39,r13 - std Z+47,r14 - std Z+55,r15 - std Z+63,r24 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - bst r10,0 - lsr r13 - ror r12 - ror r11 - ror r10 - bld r13,7 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+12,r22 - std Z+13,r23 - std Z+14,r26 - std Z+15,r27 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r26,Z+22 - ldd r27,Z+23 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r23 - mov r23,r26 - mov r26,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r22 - std Z+21,r23 - std Z+22,r26 - std Z+23,r27 - movw r10,r2 - movw r12,r4 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - mov r0,r2 - mov r2,r4 - mov r4,r0 - mov r0,r3 - mov r3,r5 - mov r5,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - lsr r5 - ror r4 - ror r3 - ror r2 - ror r0 - or r5,r0 - mov r0,r6 - mov r6,r8 - mov r8,r0 - mov r0,r7 - mov r7,r9 - mov r9,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r6,r10 - eor r7,r11 - eor r8,r12 - eor r9,r13 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r26,Z+38 - ldd r27,Z+39 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r12 - mov r12,r0 - mov r0,r11 - mov r11,r13 - mov r13,r0 - mov r0,r14 - mov r14,r24 - mov r24,r0 - mov r0,r15 - mov r15,r25 - mov r25,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - std Z+36,r22 - std Z+37,r23 - std Z+38,r26 - std Z+39,r27 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r22,Z+44 - ldd r23,Z+45 - ldd r26,Z+46 - ldd r27,Z+47 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+40,r18 - std Z+41,r19 - std Z+42,r20 - std Z+43,r21 - std Z+44,r22 - std Z+45,r23 - std Z+46,r26 - std Z+47,r27 - ldd r18,Z+48 - ldd r19,Z+49 - ldd r20,Z+50 - ldd r21,Z+51 - ldd r22,Z+52 - ldd r23,Z+53 - ldd r26,Z+54 - ldd r27,Z+55 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+48,r18 - std Z+49,r19 - std Z+50,r20 - std Z+51,r21 - std Z+52,r22 - std Z+53,r23 - std Z+54,r26 - std Z+55,r27 - ldd r18,Z+56 - ldd r19,Z+57 - ldd r20,Z+58 - ldd r21,Z+59 - ldd r22,Z+60 - ldd r23,Z+61 - ldd r26,Z+62 - ldd r27,Z+63 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - lsl r10 - rol r11 - rol r12 - rol r13 - adc r10,r1 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r21,r0 - mov r0,r27 - mov r27,r26 - mov r26,r23 - mov r23,r22 - mov r22,r0 - mov r0,r1 - lsr r27 - ror r26 - ror r23 - ror r22 - ror r0 - or r27,r0 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - std Z+56,r18 - std Z+57,r19 - std Z+58,r20 - std Z+59,r21 - std Z+60,r22 - std Z+61,r23 - std Z+62,r26 - std Z+63,r27 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - movw r10,r18 - movw r12,r20 - movw r14,r22 - movw r24,r26 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r14,r18 - eor r15,r19 - eor r24,r20 - eor r25,r21 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - mov r0,r22 - mov r22,r26 - mov r26,r0 - mov r0,r23 - mov r23,r27 - mov r27,r0 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - lsl r22 - rol r23 - rol r26 - rol r27 - adc r22,r1 - eor r18,r14 - eor r19,r15 - eor r20,r24 - eor r21,r25 - eor r22,r10 - eor r23,r11 - eor r26,r12 - eor r27,r13 - ldd r10,Y+9 - ldd r11,Y+10 - ldd r12,Y+11 - ldd r13,Y+12 - eor r10,r18 - eor r11,r19 - eor r12,r20 - eor r13,r21 - ldd r0,Z+12 - eor r10,r0 - ldd r0,Z+13 - eor r11,r0 - ldd r0,Z+14 - eor r12,r0 - ldd r0,Z+15 - eor r13,r0 - ldd r0,Z+32 - eor r10,r0 - ldd r0,Z+33 - eor r11,r0 - ldd r0,Z+34 - eor r12,r0 - ldd r0,Z+35 - eor r13,r0 - ldd r0,Z+52 - eor r10,r0 - ldd r0,Z+53 - eor r11,r0 - ldd r0,Z+54 - eor r12,r0 - ldd r0,Z+55 - eor r13,r0 - std Y+9,r10 - std Y+10,r11 - std Y+11,r12 - std Y+12,r13 - ldd r10,Y+13 - ldd r11,Y+14 - ldd r12,Y+15 - ldd r13,Y+16 - eor r10,r22 - eor r11,r23 - eor r12,r26 - eor r13,r27 - ldd r0,Z+16 - eor r10,r0 - ldd r0,Z+17 - eor r11,r0 - ldd r0,Z+18 - eor r12,r0 - ldd r0,Z+19 - eor r13,r0 - ldd r0,Z+36 - eor r10,r0 - ldd r0,Z+37 - eor r11,r0 - ldd r0,Z+38 - eor r12,r0 - ldd r0,Z+39 - eor r13,r0 - ldd r0,Z+40 - eor r10,r0 - ldd r0,Z+41 - eor r11,r0 - ldd r0,Z+42 - eor r12,r0 - ldd r0,Z+43 - eor r13,r0 - std Y+13,r10 - std Y+14,r11 - std Y+15,r12 - std Y+16,r13 - ldd r10,Y+17 - ldd r11,Y+18 - ldd r12,Y+19 - ldd r13,Y+20 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - ld r0,Z - eor r10,r0 - ldd r0,Z+1 - eor r11,r0 - ldd r0,Z+2 - eor r12,r0 - ldd r0,Z+3 - eor r13,r0 - ldd r0,Z+20 - eor r10,r0 - ldd r0,Z+21 - eor r11,r0 - ldd r0,Z+22 - eor r12,r0 - ldd r0,Z+23 - eor r13,r0 - ldd r0,Z+44 - eor r10,r0 - ldd r0,Z+45 - eor r11,r0 - ldd r0,Z+46 - eor r12,r0 - ldd r0,Z+47 - eor r13,r0 - std Y+17,r10 - std Y+18,r11 - std Y+19,r12 - std Y+20,r13 - ldd r10,Y+21 - ldd r11,Y+22 - ldd r12,Y+23 - ldd r13,Y+24 - eor r10,r6 - eor r11,r7 - eor r12,r8 - eor r13,r9 - ldd r0,Z+4 - eor r10,r0 - ldd r0,Z+5 - eor r11,r0 - ldd r0,Z+6 - eor r12,r0 - ldd r0,Z+7 - eor r13,r0 - ldd r0,Z+8 - eor r10,r0 - ldd r0,Z+9 - eor r11,r0 - ldd r0,Z+10 - eor r12,r0 - ldd r0,Z+11 - eor r13,r0 - ldd r0,Z+48 - eor r10,r0 - ldd r0,Z+49 - eor r11,r0 - ldd r0,Z+50 - eor r12,r0 - ldd r0,Z+51 - eor r13,r0 - std Y+21,r10 - std Y+22,r11 - std Y+23,r12 - std Y+24,r13 - ldd r10,Y+25 - dec r10 - std Y+25,r10 - breq 6623f - rjmp 40b -6623: - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - std Z+28,r6 - std Z+29,r7 - std Z+30,r8 - std Z+31,r9 - st -Z,r27 - st -Z,r26 - st -Z,r23 - st -Z,r22 - st -Z,r21 - st -Z,r20 - st -Z,r19 - st -Z,r18 - ldi r25,72 - add r30,r25 - adc r31,r1 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - ldd r2,Y+17 - ldd r3,Y+18 - ldd r4,Y+19 - ldd r5,Y+20 - ldd r6,Y+21 - ldd r7,Y+22 - ldd r8,Y+23 - ldd r9,Y+24 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - std Z+12,r6 - std Z+13,r7 - std Z+14,r8 - std Z+15,r9 - adiw r28,26 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size drysponge256_g, .-drysponge256_g - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.c deleted file mode 100644 index 6dfe48c..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.c +++ /dev/null @@ -1,611 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-drysponge.h" -#include - -#if !defined(__AVR__) - -/* Right rotations in bit-interleaved format */ -#define intRightRotateEven(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, (bits)); \ - _x1 = rightRotate(_x1, (bits)); \ - _x0 | (((uint64_t)_x1) << 32); \ - })) -#define intRightRotateOdd(x,bits) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate(_x0, ((bits) + 1) % 32); \ - _x1 = rightRotate(_x1, (bits)); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate1_64(x) \ - (__extension__ ({ \ - uint32_t _x0 = (uint32_t)(x); \ - uint32_t _x1 = (uint32_t)((x) >> 32); \ - _x0 = rightRotate1(_x0); \ - _x1 | (((uint64_t)_x0) << 32); \ - })) -#define intRightRotate2_64(x) (intRightRotateEven((x), 1)) -#define intRightRotate3_64(x) (intRightRotateOdd((x), 1)) -#define intRightRotate4_64(x) (intRightRotateEven((x), 2)) -#define intRightRotate5_64(x) (intRightRotateOdd((x), 2)) -#define intRightRotate6_64(x) (intRightRotateEven((x), 3)) -#define intRightRotate7_64(x) (intRightRotateOdd((x), 3)) -#define intRightRotate8_64(x) (intRightRotateEven((x), 4)) -#define intRightRotate9_64(x) (intRightRotateOdd((x), 4)) -#define intRightRotate10_64(x) (intRightRotateEven((x), 5)) -#define intRightRotate11_64(x) (intRightRotateOdd((x), 5)) -#define intRightRotate12_64(x) (intRightRotateEven((x), 6)) -#define intRightRotate13_64(x) (intRightRotateOdd((x), 6)) -#define intRightRotate14_64(x) (intRightRotateEven((x), 7)) -#define intRightRotate15_64(x) (intRightRotateOdd((x), 7)) -#define intRightRotate16_64(x) (intRightRotateEven((x), 8)) -#define intRightRotate17_64(x) (intRightRotateOdd((x), 8)) -#define intRightRotate18_64(x) (intRightRotateEven((x), 9)) -#define intRightRotate19_64(x) (intRightRotateOdd((x), 9)) -#define intRightRotate20_64(x) (intRightRotateEven((x), 10)) -#define intRightRotate21_64(x) (intRightRotateOdd((x), 10)) -#define intRightRotate22_64(x) (intRightRotateEven((x), 11)) -#define intRightRotate23_64(x) (intRightRotateOdd((x), 11)) -#define intRightRotate24_64(x) (intRightRotateEven((x), 12)) -#define intRightRotate25_64(x) (intRightRotateOdd((x), 12)) -#define intRightRotate26_64(x) (intRightRotateEven((x), 13)) -#define intRightRotate27_64(x) (intRightRotateOdd((x), 13)) -#define intRightRotate28_64(x) (intRightRotateEven((x), 14)) -#define intRightRotate29_64(x) (intRightRotateOdd((x), 14)) -#define intRightRotate30_64(x) (intRightRotateEven((x), 15)) -#define intRightRotate31_64(x) (intRightRotateOdd((x), 15)) -#define intRightRotate32_64(x) (intRightRotateEven((x), 16)) -#define intRightRotate33_64(x) (intRightRotateOdd((x), 16)) -#define intRightRotate34_64(x) (intRightRotateEven((x), 17)) -#define intRightRotate35_64(x) (intRightRotateOdd((x), 17)) -#define intRightRotate36_64(x) (intRightRotateEven((x), 18)) -#define intRightRotate37_64(x) (intRightRotateOdd((x), 18)) -#define intRightRotate38_64(x) (intRightRotateEven((x), 19)) -#define intRightRotate39_64(x) (intRightRotateOdd((x), 19)) -#define intRightRotate40_64(x) (intRightRotateEven((x), 20)) -#define intRightRotate41_64(x) (intRightRotateOdd((x), 20)) -#define intRightRotate42_64(x) (intRightRotateEven((x), 21)) -#define intRightRotate43_64(x) (intRightRotateOdd((x), 21)) -#define intRightRotate44_64(x) (intRightRotateEven((x), 22)) -#define intRightRotate45_64(x) (intRightRotateOdd((x), 22)) -#define intRightRotate46_64(x) (intRightRotateEven((x), 23)) -#define intRightRotate47_64(x) (intRightRotateOdd((x), 23)) -#define intRightRotate48_64(x) (intRightRotateEven((x), 24)) -#define intRightRotate49_64(x) (intRightRotateOdd((x), 24)) -#define intRightRotate50_64(x) (intRightRotateEven((x), 25)) -#define intRightRotate51_64(x) (intRightRotateOdd((x), 25)) -#define intRightRotate52_64(x) (intRightRotateEven((x), 26)) -#define intRightRotate53_64(x) (intRightRotateOdd((x), 26)) -#define intRightRotate54_64(x) (intRightRotateEven((x), 27)) -#define intRightRotate55_64(x) (intRightRotateOdd((x), 27)) -#define intRightRotate56_64(x) (intRightRotateEven((x), 28)) -#define intRightRotate57_64(x) (intRightRotateOdd((x), 28)) -#define intRightRotate58_64(x) (intRightRotateEven((x), 29)) -#define intRightRotate59_64(x) (intRightRotateOdd((x), 29)) -#define intRightRotate60_64(x) (intRightRotateEven((x), 30)) -#define intRightRotate61_64(x) (intRightRotateOdd((x), 30)) -#define intRightRotate62_64(x) (intRightRotateEven((x), 31)) -#define intRightRotate63_64(x) (intRightRotateOdd((x), 31)) - -void gascon128_core_round(gascon128_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); -#endif - - /* Add the round constant to the middle of the state */ - x2 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x4; x2 ^= x1; x4 ^= x3; t0 = (~x0) & x1; t1 = (~x1) & x2; - t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x0; x0 ^= t1; - x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; x1 ^= x0; x3 ^= x2; - x0 ^= x4; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); -#endif -} - -void gascon256_core_round(gascon256_state_t *state, uint8_t round) -{ - uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - - /* Load the state into local varaibles */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; - uint64_t x8 = state->S[8]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); - uint64_t x8 = le_load_word64(state->B + 64); -#endif - - /* Add the round constant to the middle of the state */ - x4 ^= ((0x0F - round) << 4) | round; - - /* Substitution layer */ - x0 ^= x8; x2 ^= x1; x4 ^= x3; x6 ^= x5; x8 ^= x7; t0 = (~x0) & x1; - t1 = (~x1) & x2; t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x5; - t5 = (~x5) & x6; t6 = (~x6) & x7; t7 = (~x7) & x8; t8 = (~x8) & x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t5; x5 ^= t6; x6 ^= t7; - x7 ^= t8; x8 ^= t0; x1 ^= x0; x3 ^= x2; x5 ^= x4; x7 ^= x6; x0 ^= x8; - x4 = ~x4; - - /* Linear diffusion layer */ - x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); - x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); - x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); - x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); - x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); - x5 ^= intRightRotate31_64(x5) ^ intRightRotate26_64(x5); - x6 ^= intRightRotate53_64(x6) ^ intRightRotate58_64(x6); - x7 ^= intRightRotate9_64(x7) ^ intRightRotate46_64(x7); - x8 ^= intRightRotate43_64(x8) ^ intRightRotate50_64(x8); - - /* Write the local variables back to the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; - state->S[8] = x8; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); - le_store_word64(state->B + 64, x8); -#endif -} - -void drysponge128_g(drysponge128_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes */ - gascon128_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon128_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4]; - } -} - -void drysponge256_g(drysponge256_state_t *state) -{ - unsigned round; - - /* Perform the first round. For each round we XOR the 16 bytes of - * the output data with the first 16 bytes of the state. And then - * XOR with the next 16 bytes of the state, rotated by 4 bytes. - * And so on for a total of 64 bytes XOR'ed into the output data. */ - gascon256_core_round(&(state->c), 0); - state->r.W[0] = state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] = state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] = state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] = state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - - /* Perform the rest of the rounds */ - for (round = 1; round < state->rounds; ++round) { - gascon256_core_round(&(state->c), round); - state->r.W[0] ^= state->c.W[0] ^ state->c.W[5] ^ - state->c.W[10] ^ state->c.W[15]; - state->r.W[1] ^= state->c.W[1] ^ state->c.W[6] ^ - state->c.W[11] ^ state->c.W[12]; - state->r.W[2] ^= state->c.W[2] ^ state->c.W[7] ^ - state->c.W[8] ^ state->c.W[13]; - state->r.W[3] ^= state->c.W[3] ^ state->c.W[4] ^ - state->c.W[9] ^ state->c.W[14]; - } -} - -#endif /* !__AVR__ */ - -void drysponge128_g_core(drysponge128_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon128_core_round(&(state->c), round); -} - -void drysponge256_g_core(drysponge256_state_t *state) -{ - unsigned round; - for (round = 0; round < state->rounds; ++round) - gascon256_core_round(&(state->c), round); -} - -/** - * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) - * \brief Selects an element of x in constant time. - * - * \param x Points to the four elements of x. - * \param index Index of which element to extract between 0 and 3. - * - * \return The selected element of x. - */ -#if !defined(__AVR__) -STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) -{ - /* We need to be careful how we select each element of x because - * we are doing a data-dependent fetch here. Do the fetch in a way - * that should avoid cache timing issues by fetching every element - * of x and masking away the ones we don't want. - * - * There is a possible side channel here with respect to power analysis. - * The "mask" value will be all-ones for the selected index and all-zeroes - * for the other indexes. This may show up as different power consumption - * for the "result ^= x[i] & mask" statement when i is the selected index. - * Such a side channel could in theory allow reading the plaintext input - * to the cipher by analysing the CPU's power consumption. - * - * The DryGASCON specification acknowledges the possibility of plaintext - * recovery in section 7.4. For software mitigation the specification - * suggests randomization of the indexes into c and x and randomization - * of the order of processing words. We aren't doing that here yet. - * Patches welcome to fix this. - */ - uint32_t mask = -((uint32_t)((0x04 - index) >> 2)); - uint32_t result = x[0] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x01)) >> 2)); - result ^= x[1] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x02)) >> 2)); - result ^= x[2] & mask; - mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); - return result ^ (x[3] & mask); -} -#else -/* AVR is more or less immune to cache timing issues because it doesn't - * have anything like an L1 or L2 cache. Select the word directly */ -#define drysponge_select_x(x, index) ((x)[(index)]) -#endif - -/** - * \brief Mixes a 32-bit value into the DrySPONGE128 state. - * - * \param state DrySPONGE128 state. - * \param data The data to be mixed in the bottom 10 bits. - */ -static void drysponge128_mix_phase_round - (drysponge128_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); -} - -/** - * \brief Mixes a 32-bit value into the DrySPONGE256 state. - * - * \param state DrySPONGE256 state. - * \param data The data to be mixed in the bottom 18 bits. - */ -static void drysponge256_mix_phase_round - (drysponge256_state_t *state, uint32_t data) -{ - /* Mix in elements from x according to the 2-bit indexes in the data */ - state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); - state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); - state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); - state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); - state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); - state->c.W[10] ^= drysponge_select_x(state->x.W, (data >> 10) & 0x03); - state->c.W[12] ^= drysponge_select_x(state->x.W, (data >> 12) & 0x03); - state->c.W[14] ^= drysponge_select_x(state->x.W, (data >> 14) & 0x03); - state->c.W[16] ^= drysponge_select_x(state->x.W, (data >> 16) & 0x03); -} - -/** - * \brief Mixes an input block into a DrySPONGE128 state. - * - * \param state The DrySPONGE128 state. - * \param data Full rate block containing the input data. - */ -static void drysponge128_mix_phase - (drysponge128_state_t *state, const unsigned char data[DRYSPONGE128_RATE]) -{ - /* Mix 10-bit groups into the output, with the domain - * separator added to the last two groups */ - drysponge128_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[1] >> 2) | (((uint32_t)(data[2])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[2] >> 4) | (((uint32_t)(data[3])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[3] >> 6) | (((uint32_t)(data[4])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[5] | (((uint32_t)(data[6])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[6] >> 2) | (((uint32_t)(data[7])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[7] >> 4) | (((uint32_t)(data[8])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[8] >> 6) | (((uint32_t)(data[9])) << 2)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, data[10] | (((uint32_t)(data[11])) << 8)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, (data[12] >> 4) | (((uint32_t)(data[13])) << 4)); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round - (state, ((data[13] >> 6) | (((uint32_t)(data[14])) << 2))); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, data[15] ^ state->domain); - gascon128_core_round(&(state->c), 0); - drysponge128_mix_phase_round(state, state->domain >> 10); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -/** - * \brief Mixes an input block into a DrySPONGE256 state. - * - * \param state The DrySPONGE256 state. - * \param data Full rate block containing the input data. - */ -static void drysponge256_mix_phase - (drysponge256_state_t *state, const unsigned char data[DRYSPONGE256_RATE]) -{ - /* Mix 18-bit groups into the output, with the domain in the last group */ - drysponge256_mix_phase_round - (state, data[0] | (((uint32_t)(data[1])) << 8) | - (((uint32_t)(data[2])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[2] >> 2) | (((uint32_t)(data[3])) << 6) | - (((uint32_t)(data[4])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[4] >> 4) | (((uint32_t)(data[5])) << 4) | - (((uint32_t)(data[6])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[6] >> 6) | (((uint32_t)(data[7])) << 2) | - (((uint32_t)(data[8])) << 10)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, data[9] | (((uint32_t)(data[10])) << 8) | - (((uint32_t)(data[11])) << 16)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6) | - (((uint32_t)(data[13])) << 14)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[13] >> 4) | (((uint32_t)(data[14])) << 4) | - (((uint32_t)(data[15])) << 12)); - gascon256_core_round(&(state->c), 0); - drysponge256_mix_phase_round - (state, (data[15] >> 6) ^ state->domain); - - /* Revert to the default domain separator for the next block */ - state->domain = 0; -} - -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE128_RATE) { - drysponge128_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE128_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE128_RATE - len - 1); - drysponge128_mix_phase(state, padded); - } -} - -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len) -{ - if (len >= DRYSPONGE256_RATE) { - drysponge256_mix_phase(state, input); - } else { - unsigned char padded[DRYSPONGE256_RATE]; - memcpy(padded, input, len); - padded[len] = 0x01; - memset(padded + len + 1, 0, DRYSPONGE256_RATE - len - 1); - drysponge256_mix_phase(state, padded); - } -} - -/** - * \brief Determine if some of the words of an "x" value are identical. - * - * \param x Points to the "x" buffer to check. - * - * \return Non-zero if some of the words are the same, zero if they are - * distinct from each other. - * - * We try to perform the check in constant time to avoid giving away - * any information about the value of the key. - */ -static int drysponge_x_words_are_same(const uint32_t x[4]) -{ - unsigned i, j; - int result = 0; - for (i = 0; i < 3; ++i) { - for (j = i + 1; j < 4; ++j) { - uint32_t check = x[i] ^ x[j]; - result |= (int)((0x100000000ULL - check) >> 32); - } - } - return result; -} - -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-128 state with repeated copies of the key */ - memcpy(state->c.B, key, 16); - memcpy(state->c.B + 16, key, 16); - memcpy(state->c.B + 32, key, 8); - - /* Generate the "x" value for the state. All four words of "x" - * must be unique because they will be used in drysponge_select_x() - * as stand-ins for the bit pairs 00, 01, 10, and 11. - * - * Run the core block operation over and over until "x" is unique. - * Technically the runtime here is key-dependent and not constant. - * If the input key is randomized, this should only take 1 round - * on average so it is "almost constant time". - */ - do { - gascon128_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE128_INIT_ROUNDS; - state->domain = DRYDOMAIN128_NONCE; - if (final_block) - state->domain |= DRYDOMAIN128_FINAL; - drysponge128_f_absorb(state, nonce, 16); - drysponge128_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE128_ROUNDS; -} - -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block) -{ - /* Fill the GASCON-256 state with repeated copies of the key */ - memcpy(state->c.B, key, 32); - memcpy(state->c.B + 32, key, 32); - memcpy(state->c.B + 64, key, 8); - - /* Generate the "x" value for the state */ - do { - gascon256_core_round(&(state->c), 0); - } while (drysponge_x_words_are_same(state->c.W)); - memcpy(state->x.W, state->c.W, sizeof(state->x)); - - /* Replace the generated "x" value in the state with the key prefix */ - memcpy(state->c.W, key, sizeof(state->x)); - - /* Absorb the nonce into the state with an increased number of rounds */ - state->rounds = DRYSPONGE256_INIT_ROUNDS; - state->domain = DRYDOMAIN256_NONCE; - if (final_block) - state->domain |= DRYDOMAIN256_FINAL; - drysponge256_f_absorb(state, nonce, 16); - drysponge256_g(state); - - /* Set up the normal number of rounds for future operations */ - state->rounds = DRYSPONGE256_ROUNDS; -} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.h deleted file mode 100644 index 05b0c16..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-drysponge.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_DRYSPONGE_H -#define LW_INTERNAL_DRYSPONGE_H - -#include "internal-util.h" - -/** - * \file internal-drysponge.h - * \brief Internal implementation of DrySPONGE for the DryGASCON cipher. - * - * References: https://github.com/sebastien-riou/DryGASCON - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the GASCON-128 permutation state in bytes. - */ -#define GASCON128_STATE_SIZE 40 - -/** - * \brief Size of the GASCON-256 permutation state in bytes. - */ -#define GASCON256_STATE_SIZE 72 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE128. - */ -#define DRYSPONGE128_RATE 16 - -/** - * \brief Rate of absorption and squeezing for DrySPONGE256. - */ -#define DRYSPONGE256_RATE 16 - -/** - * \brief Size of the "x" value for DrySPONGE128. - */ -#define DRYSPONGE128_XSIZE 16 - -/** - * \brief Size of the "x" value for DrySPONGE256. - */ -#define DRYSPONGE256_XSIZE 16 - -/** - * \brief Normal number of rounds for DrySPONGE128 when absorbing - * and squeezing data. - */ -#define DRYSPONGE128_ROUNDS 7 - -/** - * \brief Number of rounds for DrySPONGE128 during initialization. - */ -#define DRYSPONGE128_INIT_ROUNDS 11 - -/** - * \brief Normal number of rounds for DrySPONGE256 when absorbing - * and squeezing data. - */ -#define DRYSPONGE256_ROUNDS 8 - -/** - * \brief Number of rounds for DrySPONGE256 during initialization. - */ -#define DRYSPONGE256_INIT_ROUNDS 12 - -/** - * \brief DrySPONGE128 domain bit for a padded block. - */ -#define DRYDOMAIN128_PADDED (1 << 8) - -/** - * \brief DrySPONGE128 domain bit for a final block. - */ -#define DRYDOMAIN128_FINAL (1 << 9) - -/** - * \brief DrySPONGE128 domain value for processing the nonce. - */ -#define DRYDOMAIN128_NONCE (1 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the associated data. - */ -#define DRYDOMAIN128_ASSOC_DATA (2 << 10) - -/** - * \brief DrySPONGE128 domain value for processing the message. - */ -#define DRYDOMAIN128_MESSAGE (3 << 10) - -/** - * \brief DrySPONGE256 domain bit for a padded block. - */ -#define DRYDOMAIN256_PADDED (1 << 2) - -/** - * \brief DrySPONGE256 domain bit for a final block. - */ -#define DRYDOMAIN256_FINAL (1 << 3) - -/** - * \brief DrySPONGE256 domain value for processing the nonce. - */ -#define DRYDOMAIN256_NONCE (1 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the associated data. - */ -#define DRYDOMAIN256_ASSOC_DATA (2 << 4) - -/** - * \brief DrySPONGE256 domain value for processing the message. - */ -#define DRYDOMAIN256_MESSAGE (3 << 4) - -/** - * \brief Internal state of the GASCON-128 permutation. - */ -typedef union -{ - uint64_t S[GASCON128_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON128_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON128_STATE_SIZE]; /**< Bytes of the state */ - -} gascon128_state_t; - -/** - * \brief Internal state of the GASCON-256 permutation. - */ -typedef union -{ - uint64_t S[GASCON256_STATE_SIZE / 8]; /**< 64-bit words of the state */ - uint32_t W[GASCON256_STATE_SIZE / 4]; /**< 32-bit words of the state */ - uint8_t B[GASCON256_STATE_SIZE]; /**< Bytes of the state */ - -} gascon256_state_t; - -/** - * \brief Structure of a rate block for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_RATE]; /**< Bytes of the rate */ - -} drysponge128_rate_t; - -/** - * \brief Structure of a rate block for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_RATE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_RATE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_RATE]; /**< Bytes of the rate */ - -} drysponge256_rate_t; - -/** - * \brief Structure of the "x" value for DrySPONGE128. - */ -typedef union -{ - uint64_t S[DRYSPONGE128_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE128_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE128_XSIZE]; /**< Bytes of the rate */ - -} drysponge128_x_t; - -/** - * \brief Structure of the "x" value for DrySPONGE256. - */ -typedef union -{ - uint64_t S[DRYSPONGE256_XSIZE / 8]; /**< 64-bit words of the rate */ - uint32_t W[DRYSPONGE256_XSIZE / 4]; /**< 32-bit words of the rate */ - uint8_t B[DRYSPONGE256_XSIZE]; /**< Bytes of the rate */ - -} drysponge256_x_t; - -/** - * \brief Structure of the rolling DrySPONGE128 state. - */ -typedef struct -{ - gascon128_state_t c; /**< GASCON-128 state for the capacity */ - drysponge128_rate_t r; /**< Buffer for a rate block of data */ - drysponge128_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge128_state_t; - -/** - * \brief Structure of the rolling DrySPONGE256 state. - */ -typedef struct -{ - gascon256_state_t c; /**< GASCON-256 state for the capacity */ - drysponge256_rate_t r; /**< Buffer for a rate block of data */ - drysponge256_x_t x; /**< "x" value for the sponge */ - uint32_t domain; /**< Domain value to mix on next F call */ - uint32_t rounds; /**< Number of rounds for next G call */ - -} drysponge256_state_t; - -/** - * \brief Permutes the GASCON-128 state using one iteration of CoreRound. - * - * \param state The GASCON-128 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon128_core_round(gascon128_state_t *state, uint8_t round); - -/** - * \brief Permutes the GASCON-256 state using one iteration of CoreRound. - * - * \param state The GASCON-256 state to be permuted. - * \param round The round number. - * - * The input and output \a state will be in little-endian byte order. - */ -void gascon256_core_round(gascon256_state_t *state, uint8_t round); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds and squeezes data out of the GASGON-128 state. - * - * \param state The DrySPONGE128 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge128_g(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds and squeezes data out of the GASGON-256 state. - * - * \param state The DrySPONGE256 state. - * - * The data that is squeezed out will be in state->r on exit. - */ -void drysponge256_g(drysponge256_state_t *state); - -/** - * \brief Performs the DrySPONGE128 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE128 state. - */ -void drysponge128_g_core(drysponge128_state_t *state); - -/** - * \brief Performs the DrySPONGE256 G function which runs the core - * rounds but does not squeeze out any output. - * - * \param state The DrySPONGE256 state. - */ -void drysponge256_g_core(drysponge256_state_t *state); - -/** - * \brief Performs the absorption phase of the DrySPONGE128 F function. - * - * \param state The DrySPONGE128 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE128_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge128_g() or - * drysponge128_g_core() to perform the full F operation. - */ -void drysponge128_f_absorb - (drysponge128_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Performs the absorption phase of the DrySPONGE256 F function. - * - * \param state The DrySPONGE256 state. - * \param input The block of input data to incorporate into the state. - * \param len The length of the input block, which must be less than - * or equal to DRYSPONGE256_RATE. Smaller input blocks will be padded. - * - * This function must be followed by a call to drysponge256_g() or - * drysponge256_g_core() to perform the full F operation. - */ -void drysponge256_f_absorb - (drysponge256_state_t *state, const unsigned char *input, unsigned len); - -/** - * \brief Set up a DrySPONGE128 state to begin encryption or decryption. - * - * \param state The DrySPONGE128 state. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge128_setup - (drysponge128_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -/** - * \brief Set up a DrySPONGE256 state to begin encryption or decryption. - * - * \param state The DrySPONGE256 state. - * \param key Points to the 32 bytes of the key. - * \param nonce Points to the 16 bytes of the nonce. - * \param final_block Non-zero if after key setup there will be no more blocks. - */ -void drysponge256_setup - (drysponge256_state_t *state, const unsigned char *key, - const unsigned char *nonce, int final_block); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-util.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/drygascon/Implementations/crypto_hash/drygascon256/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/api.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys/api.h new file mode 100644 index 0000000..de9380d --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 64 diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.c new file mode 100644 index 0000000..e963903 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.c @@ -0,0 +1,421 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drygascon.h" +#include "internal-drysponge.h" +#include + +aead_cipher_t const drygascon128_cipher = { + "DryGASCON128", + DRYGASCON128_KEY_SIZE, + DRYGASCON128_NONCE_SIZE, + DRYGASCON128_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon128_aead_encrypt, + drygascon128_aead_decrypt +}; + +aead_cipher_t const drygascon256_cipher = { + "DryGASCON256", + DRYGASCON256_KEY_SIZE, + DRYGASCON256_NONCE_SIZE, + DRYGASCON256_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon256_aead_encrypt, + drygascon256_aead_decrypt +}; + +aead_hash_algorithm_t const drygascon128_hash_algorithm = { + "DryGASCON128-HASH", + sizeof(int), + DRYGASCON128_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon128_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const drygascon256_hash_algorithm = { + "DryGASCON256-HASH", + sizeof(int), + DRYGASCON256_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + drygascon256_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Processes associated data for DryGASCON128. + * + * \param state DrySPONGE128 sponge state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data, must not be zero. + * \param finalize Non-zero to finalize packet processing because + * the message is zero-length. + */ +static void drygascon128_process_ad + (drysponge128_state_t *state, const unsigned char *ad, + unsigned long long adlen, int finalize) +{ + /* Process all blocks except the last one */ + while (adlen > DRYSPONGE128_RATE) { + drysponge128_f_absorb(state, ad, DRYSPONGE128_RATE); + drysponge128_g_core(state); + ad += DRYSPONGE128_RATE; + adlen -= DRYSPONGE128_RATE; + } + + /* Process the last block with domain separation and padding */ + state->domain = DRYDOMAIN128_ASSOC_DATA; + if (finalize) + state->domain |= DRYDOMAIN128_FINAL; + if (adlen < DRYSPONGE128_RATE) + state->domain |= DRYDOMAIN128_PADDED; + drysponge128_f_absorb(state, ad, (unsigned)adlen); + drysponge128_g(state); +} + +/** + * \brief Processes associated data for DryGASCON256. + * + * \param state DrySPONGE256 sponge state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data, must not be zero. + * \param finalize Non-zero to finalize packet processing because + * the message is zero-length. + */ +static void drygascon256_process_ad + (drysponge256_state_t *state, const unsigned char *ad, + unsigned long long adlen, int finalize) +{ + /* Process all blocks except the last one */ + while (adlen > DRYSPONGE256_RATE) { + drysponge256_f_absorb(state, ad, DRYSPONGE256_RATE); + drysponge256_g_core(state); + ad += DRYSPONGE256_RATE; + adlen -= DRYSPONGE256_RATE; + } + + /* Process the last block with domain separation and padding */ + state->domain = DRYDOMAIN256_ASSOC_DATA; + if (finalize) + state->domain |= DRYDOMAIN256_FINAL; + if (adlen < DRYSPONGE256_RATE) + state->domain |= DRYDOMAIN256_PADDED; + drysponge256_f_absorb(state, ad, (unsigned)adlen); + drysponge256_g(state); +} + +int drygascon128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge128_state_t state; + unsigned temp; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + DRYGASCON128_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + drysponge128_setup(&state, k, npub, adlen == 0 && mlen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon128_process_ad(&state, ad, adlen, mlen == 0); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + /* Processs all blocks except the last one */ + while (mlen > DRYSPONGE128_RATE) { + drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); + lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE128_RATE); + drysponge128_g(&state); + c += DRYSPONGE128_RATE; + m += DRYSPONGE128_RATE; + mlen -= DRYSPONGE128_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; + if (mlen < DRYSPONGE128_RATE) + state.domain |= DRYDOMAIN128_PADDED; + temp = (unsigned)mlen; + drysponge128_f_absorb(&state, m, temp); + lw_xor_block_2_src(c, m, state.r.B, temp); + drysponge128_g(&state); + c += temp; + } + + /* Generate the authentication tag */ + memcpy(c, state.r.B, DRYGASCON128_TAG_SIZE); + return 0; +} + +int drygascon128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge128_state_t state; + unsigned char *mtemp = m; + unsigned temp; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < DRYGASCON128_TAG_SIZE) + return -1; + *mlen = clen - DRYGASCON128_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + clen -= DRYGASCON128_TAG_SIZE; + drysponge128_setup(&state, k, npub, adlen == 0 && clen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon128_process_ad(&state, ad, adlen, clen == 0); + + /* Decrypt the ciphertext to produce the plaintext */ + if (clen > 0) { + /* Processs all blocks except the last one */ + while (clen > DRYSPONGE128_RATE) { + lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE128_RATE); + drysponge128_f_absorb(&state, m, DRYSPONGE128_RATE); + drysponge128_g(&state); + c += DRYSPONGE128_RATE; + m += DRYSPONGE128_RATE; + clen -= DRYSPONGE128_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN128_MESSAGE | DRYDOMAIN128_FINAL; + if (clen < DRYSPONGE128_RATE) + state.domain |= DRYDOMAIN128_PADDED; + temp = (unsigned)clen; + lw_xor_block_2_src(m, c, state.r.B, temp); + drysponge128_f_absorb(&state, m, temp); + drysponge128_g(&state); + c += temp; + } + + /* Check the authentication tag */ + return aead_check_tag(mtemp, *mlen, state.r.B, c, DRYGASCON128_TAG_SIZE); +} + +int drygascon256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge256_state_t state; + unsigned temp; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + DRYGASCON256_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + drysponge256_setup(&state, k, npub, adlen == 0 && mlen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon256_process_ad(&state, ad, adlen, mlen == 0); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + /* Processs all blocks except the last one */ + while (mlen > DRYSPONGE256_RATE) { + drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); + lw_xor_block_2_src(c, m, state.r.B, DRYSPONGE256_RATE); + drysponge256_g(&state); + c += DRYSPONGE256_RATE; + m += DRYSPONGE256_RATE; + mlen -= DRYSPONGE256_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; + if (mlen < DRYSPONGE256_RATE) + state.domain |= DRYDOMAIN256_PADDED; + temp = (unsigned)mlen; + drysponge256_f_absorb(&state, m, temp); + lw_xor_block_2_src(c, m, state.r.B, temp); + drysponge256_g(&state); + c += temp; + } + + /* Generate the authentication tag */ + memcpy(c, state.r.B, 16); + drysponge256_g(&state); + memcpy(c + 16, state.r.B, 16); + return 0; +} + +int drygascon256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + drysponge256_state_t state; + unsigned char *mtemp = m; + unsigned temp; + int result; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < DRYGASCON256_TAG_SIZE) + return -1; + *mlen = clen - DRYGASCON256_TAG_SIZE; + + /* Initialize the sponge state with the key and nonce */ + clen -= DRYGASCON256_TAG_SIZE; + drysponge256_setup(&state, k, npub, adlen == 0 && clen == 0); + + /* Process the associated data */ + if (adlen > 0) + drygascon256_process_ad(&state, ad, adlen, clen == 0); + + /* Decrypt the ciphertext to produce the plaintext */ + if (clen > 0) { + /* Processs all blocks except the last one */ + while (clen > DRYSPONGE256_RATE) { + lw_xor_block_2_src(m, c, state.r.B, DRYSPONGE256_RATE); + drysponge256_f_absorb(&state, m, DRYSPONGE256_RATE); + drysponge256_g(&state); + c += DRYSPONGE256_RATE; + m += DRYSPONGE256_RATE; + clen -= DRYSPONGE256_RATE; + } + + /* Process the last block with domain separation and padding */ + state.domain = DRYDOMAIN256_MESSAGE | DRYDOMAIN256_FINAL; + if (clen < DRYSPONGE256_RATE) + state.domain |= DRYDOMAIN256_PADDED; + temp = (unsigned)clen; + lw_xor_block_2_src(m, c, state.r.B, temp); + drysponge256_f_absorb(&state, m, temp); + drysponge256_g(&state); + c += temp; + } + + /* Check the authentication tag which is split into two pieces */ + result = aead_check_tag(0, 0, state.r.B, c, 16); + drysponge256_g(&state); + return aead_check_tag_precheck + (mtemp, *mlen, state.r.B, c + 16, 16, ~result); +} + +/** + * \brief Precomputed initialization vector for DryGASCON128-HASH. + * + * This is the CST_H value from the DryGASCON specification after it + * has been processed by the key setup function for DrySPONGE128. + */ +static unsigned char const drygascon128_hash_init[] = { + /* c */ + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + /* x */ + 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, + 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89 +}; + +int drygascon128_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + drysponge128_state_t state; + memcpy(state.c.B, drygascon128_hash_init, sizeof(state.c.B)); + memcpy(state.x.B, drygascon128_hash_init + sizeof(state.c.B), + sizeof(state.x.B)); + state.domain = 0; + state.rounds = DRYSPONGE128_ROUNDS; + drygascon128_process_ad(&state, in, inlen, 1); + memcpy(out, state.r.B, 16); + drysponge128_g(&state); + memcpy(out + 16, state.r.B, 16); + return 0; +} + +/** + * \brief Precomputed initialization vector for DryGASCON256-HASH. + * + * This is the CST_H value from the DryGASCON specification after it + * has been processed by the key setup function for DrySPONGE256. + */ +static unsigned char const drygascon256_hash_init[] = { + /* c */ + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, + 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + 0x13, 0x19, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x44, + 0xa4, 0x09, 0x38, 0x22, 0x29, 0x9f, 0x31, 0xd0, + 0x08, 0x2e, 0xfa, 0x98, 0xec, 0x4e, 0x6c, 0x89, + 0x24, 0x3f, 0x6a, 0x88, 0x85, 0xa3, 0x08, 0xd3, + /* x */ + 0x45, 0x28, 0x21, 0xe6, 0x38, 0xd0, 0x13, 0x77, + 0xbe, 0x54, 0x66, 0xcf, 0x34, 0xe9, 0x0c, 0x6c +}; + +int drygascon256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + drysponge256_state_t state; + memcpy(state.c.B, drygascon256_hash_init, sizeof(state.c.B)); + memcpy(state.x.B, drygascon256_hash_init + sizeof(state.c.B), + sizeof(state.x.B)); + state.domain = 0; + state.rounds = DRYSPONGE256_ROUNDS; + drygascon256_process_ad(&state, in, inlen, 1); + memcpy(out, state.r.B, 16); + drysponge256_g(&state); + memcpy(out + 16, state.r.B, 16); + drysponge256_g(&state); + memcpy(out + 32, state.r.B, 16); + drysponge256_g(&state); + memcpy(out + 48, state.r.B, 16); + return 0; +} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.h new file mode 100644 index 0000000..12e18c3 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/drygascon.h @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_DRYGASCON_H +#define LWCRYPTO_DRYGASCON_H + +#include "aead-common.h" + +/** + * \file drygascon.h + * \brief DryGASCON authenticated encryption algorithm. + * + * DryGASCON is a family of authenticated encryption algorithms based + * around a generalised version of the ASCON permutation. DryGASCON + * is designed to provide some protection against power analysis. + * + * There are four algorithms in the DryGASCON family: + * + * \li DryGASCON128 is an authenticated encryption algorithm with a + * 128-bit key, a 128-bit nonce, and a 128-bit authentication tag. + * \li DryGASCON256 is an authenticated encryption algorithm with a + * 256-bit key, a 128-bit nonce, and a 128-256 authentication tag. + * \li DryGASCON128-HASH is a hash algorithm with a 256-bit output. + * \li DryGASCON256-HASH is a hash algorithm with a 512-bit output. + * + * DryGASCON128 and DryGASCON128-HASH are the primary members of the family. + * + * References: https://github.com/sebastien-riou/DryGASCON + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for DryGASCON128. + */ +#define DRYGASCON128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for DryGASCON128. + */ +#define DRYGASCON128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for DryGASCON128. + */ +#define DRYGASCON128_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for DryGASCON128-HASH. + */ +#define DRYGASCON128_HASH_SIZE 32 + +/** + * \brief Size of the key for DryGASCON256. + */ +#define DRYGASCON256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for DryGASCON256. + */ +#define DRYGASCON256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for DryGASCON256. + */ +#define DRYGASCON256_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for DryGASCON256-HASH. + */ +#define DRYGASCON256_HASH_SIZE 64 + +/** + * \brief Meta-information block for the DryGASCON128 cipher. + */ +extern aead_cipher_t const drygascon128_cipher; + +/** + * \brief Meta-information block for the DryGASCON256 cipher. + */ +extern aead_cipher_t const drygascon256_cipher; + +/** + * \brief Meta-information block for DryGASCON128-HASH. + */ +extern aead_hash_algorithm_t const drygascon128_hash_algorithm; + +/** + * \brief Meta-information block for DryGASCON256-HASH. + */ +extern aead_hash_algorithm_t const drygascon256_hash_algorithm; + +/** + * \brief Encrypts and authenticates a packet with DryGASCON128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa drygascon128_aead_decrypt() + */ +int drygascon128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with DryGASCON128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa drygascon128_aead_encrypt() + */ +int drygascon128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with DryGASCON256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa drygascon256_aead_decrypt() + */ +int drygascon256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with DryGASCON256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa drygascon256_aead_encrypt() + */ +int drygascon256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with DRYGASCON128. + * + * \param out Buffer to receive the hash output which must be at least + * DRYGASCON128_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int drygascon128_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with DRYGASCON256. + * + * \param out Buffer to receive the hash output which must be at least + * DRYGASCON256_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int drygascon256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/hash.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys/hash.c new file mode 100644 index 0000000..6383146 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "drygascon.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return drygascon256_hash(out, in, inlen); +} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge-avr.S b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge-avr.S new file mode 100644 index 0000000..84d0ff8 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge-avr.S @@ -0,0 +1,5092 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gascon128_core_round + .type gascon128_core_round, @function +gascon128_core_round: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + eor r4,r22 + ldd r23,Z+8 + ldd r12,Z+24 + ldd r13,Z+32 + eor r18,r13 + eor r4,r23 + eor r13,r12 + mov r14,r23 + mov r0,r18 + com r0 + and r14,r0 + mov r15,r4 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r4 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r18 + mov r0,r13 + com r0 + and r16,r0 + eor r18,r15 + eor r23,r24 + eor r4,r25 + eor r12,r16 + eor r13,r14 + eor r23,r18 + eor r12,r4 + eor r18,r13 + com r4 + st Z,r18 + std Z+8,r23 + std Z+24,r12 + std Z+32,r13 + ldd r23,Z+9 + ldd r12,Z+25 + ldd r13,Z+33 + eor r19,r13 + eor r5,r23 + eor r13,r12 + mov r14,r23 + mov r0,r19 + com r0 + and r14,r0 + mov r15,r5 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r5 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r19 + mov r0,r13 + com r0 + and r16,r0 + eor r19,r15 + eor r23,r24 + eor r5,r25 + eor r12,r16 + eor r13,r14 + eor r23,r19 + eor r12,r5 + eor r19,r13 + com r5 + std Z+1,r19 + std Z+9,r23 + std Z+25,r12 + std Z+33,r13 + ldd r23,Z+10 + ldd r12,Z+26 + ldd r13,Z+34 + eor r20,r13 + eor r6,r23 + eor r13,r12 + mov r14,r23 + mov r0,r20 + com r0 + and r14,r0 + mov r15,r6 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r6 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r20 + mov r0,r13 + com r0 + and r16,r0 + eor r20,r15 + eor r23,r24 + eor r6,r25 + eor r12,r16 + eor r13,r14 + eor r23,r20 + eor r12,r6 + eor r20,r13 + com r6 + std Z+2,r20 + std Z+10,r23 + std Z+26,r12 + std Z+34,r13 + ldd r23,Z+11 + ldd r12,Z+27 + ldd r13,Z+35 + eor r21,r13 + eor r7,r23 + eor r13,r12 + mov r14,r23 + mov r0,r21 + com r0 + and r14,r0 + mov r15,r7 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r7 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r21 + mov r0,r13 + com r0 + and r16,r0 + eor r21,r15 + eor r23,r24 + eor r7,r25 + eor r12,r16 + eor r13,r14 + eor r23,r21 + eor r12,r7 + eor r21,r13 + com r7 + std Z+3,r21 + std Z+11,r23 + std Z+27,r12 + std Z+35,r13 + ldd r23,Z+12 + ldd r12,Z+28 + ldd r13,Z+36 + eor r26,r13 + eor r8,r23 + eor r13,r12 + mov r14,r23 + mov r0,r26 + com r0 + and r14,r0 + mov r15,r8 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r8 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r26 + mov r0,r13 + com r0 + and r16,r0 + eor r26,r15 + eor r23,r24 + eor r8,r25 + eor r12,r16 + eor r13,r14 + eor r23,r26 + eor r12,r8 + eor r26,r13 + com r8 + std Z+4,r26 + std Z+12,r23 + std Z+28,r12 + std Z+36,r13 + ldd r23,Z+13 + ldd r12,Z+29 + ldd r13,Z+37 + eor r27,r13 + eor r9,r23 + eor r13,r12 + mov r14,r23 + mov r0,r27 + com r0 + and r14,r0 + mov r15,r9 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r9 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r27 + mov r0,r13 + com r0 + and r16,r0 + eor r27,r15 + eor r23,r24 + eor r9,r25 + eor r12,r16 + eor r13,r14 + eor r23,r27 + eor r12,r9 + eor r27,r13 + com r9 + std Z+5,r27 + std Z+13,r23 + std Z+29,r12 + std Z+37,r13 + ldd r23,Z+14 + ldd r12,Z+30 + ldd r13,Z+38 + eor r2,r13 + eor r10,r23 + eor r13,r12 + mov r14,r23 + mov r0,r2 + com r0 + and r14,r0 + mov r15,r10 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r10 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r2 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r15 + eor r23,r24 + eor r10,r25 + eor r12,r16 + eor r13,r14 + eor r23,r2 + eor r12,r10 + eor r2,r13 + com r10 + std Z+6,r2 + std Z+14,r23 + std Z+30,r12 + std Z+38,r13 + ldd r23,Z+15 + ldd r12,Z+31 + ldd r13,Z+39 + eor r3,r13 + eor r11,r23 + eor r13,r12 + mov r14,r23 + mov r0,r3 + com r0 + and r14,r0 + mov r15,r11 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r12 + mov r0,r11 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r12 + com r0 + and r25,r0 + mov r16,r3 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r15 + eor r23,r24 + eor r11,r25 + eor r12,r16 + eor r13,r14 + eor r23,r3 + eor r12,r11 + eor r3,r13 + com r11 + std Z+7,r3 + std Z+15,r23 + std Z+31,r12 + std Z+39,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r18 + std Z+25,r19 + std Z+26,r20 + std Z+27,r21 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r18 + eor r25,r19 + eor r16,r20 + eor r17,r21 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r24 + eor r19,r25 + eor r20,r16 + eor r21,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gascon128_core_round, .-gascon128_core_round + + .text +.global drysponge128_g + .type drysponge128_g, @function +drysponge128_g: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + subi r30,180 + sbci r31,255 + ld r19,Z + subi r30,76 + sbc r31,r1 + ldi r18,240 + std Z+40,r1 + std Z+41,r1 + std Z+42,r1 + std Z+43,r1 + std Z+44,r1 + std Z+45,r1 + std Z+46,r1 + std Z+47,r1 + std Z+48,r1 + std Z+49,r1 + std Z+50,r1 + std Z+51,r1 + std Z+52,r1 + std Z+53,r1 + std Z+54,r1 + std Z+55,r1 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 +38: + eor r4,r18 + ldd r12,Z+8 + ldd r13,Z+24 + ldd r14,Z+32 + eor r20,r14 + eor r4,r12 + eor r14,r13 + mov r15,r12 + mov r0,r20 + com r0 + and r15,r0 + mov r24,r4 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r4 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r20 + mov r0,r14 + com r0 + and r17,r0 + eor r20,r24 + eor r12,r25 + eor r4,r16 + eor r13,r17 + eor r14,r15 + eor r12,r20 + eor r13,r4 + eor r20,r14 + com r4 + st Z,r20 + std Z+8,r12 + std Z+24,r13 + std Z+32,r14 + ldd r12,Z+9 + ldd r13,Z+25 + ldd r14,Z+33 + eor r21,r14 + eor r5,r12 + eor r14,r13 + mov r15,r12 + mov r0,r21 + com r0 + and r15,r0 + mov r24,r5 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r5 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r21 + mov r0,r14 + com r0 + and r17,r0 + eor r21,r24 + eor r12,r25 + eor r5,r16 + eor r13,r17 + eor r14,r15 + eor r12,r21 + eor r13,r5 + eor r21,r14 + com r5 + std Z+1,r21 + std Z+9,r12 + std Z+25,r13 + std Z+33,r14 + ldd r12,Z+10 + ldd r13,Z+26 + ldd r14,Z+34 + eor r22,r14 + eor r6,r12 + eor r14,r13 + mov r15,r12 + mov r0,r22 + com r0 + and r15,r0 + mov r24,r6 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r6 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r22 + mov r0,r14 + com r0 + and r17,r0 + eor r22,r24 + eor r12,r25 + eor r6,r16 + eor r13,r17 + eor r14,r15 + eor r12,r22 + eor r13,r6 + eor r22,r14 + com r6 + std Z+2,r22 + std Z+10,r12 + std Z+26,r13 + std Z+34,r14 + ldd r12,Z+11 + ldd r13,Z+27 + ldd r14,Z+35 + eor r23,r14 + eor r7,r12 + eor r14,r13 + mov r15,r12 + mov r0,r23 + com r0 + and r15,r0 + mov r24,r7 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r7 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r23 + mov r0,r14 + com r0 + and r17,r0 + eor r23,r24 + eor r12,r25 + eor r7,r16 + eor r13,r17 + eor r14,r15 + eor r12,r23 + eor r13,r7 + eor r23,r14 + com r7 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r14 + ldd r12,Z+12 + ldd r13,Z+28 + ldd r14,Z+36 + eor r26,r14 + eor r8,r12 + eor r14,r13 + mov r15,r12 + mov r0,r26 + com r0 + and r15,r0 + mov r24,r8 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r8 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r26 + mov r0,r14 + com r0 + and r17,r0 + eor r26,r24 + eor r12,r25 + eor r8,r16 + eor r13,r17 + eor r14,r15 + eor r12,r26 + eor r13,r8 + eor r26,r14 + com r8 + std Z+4,r26 + std Z+12,r12 + std Z+28,r13 + std Z+36,r14 + ldd r12,Z+13 + ldd r13,Z+29 + ldd r14,Z+37 + eor r27,r14 + eor r9,r12 + eor r14,r13 + mov r15,r12 + mov r0,r27 + com r0 + and r15,r0 + mov r24,r9 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r9 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r27 + mov r0,r14 + com r0 + and r17,r0 + eor r27,r24 + eor r12,r25 + eor r9,r16 + eor r13,r17 + eor r14,r15 + eor r12,r27 + eor r13,r9 + eor r27,r14 + com r9 + std Z+5,r27 + std Z+13,r12 + std Z+29,r13 + std Z+37,r14 + ldd r12,Z+14 + ldd r13,Z+30 + ldd r14,Z+38 + eor r2,r14 + eor r10,r12 + eor r14,r13 + mov r15,r12 + mov r0,r2 + com r0 + and r15,r0 + mov r24,r10 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r10 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r2 + mov r0,r14 + com r0 + and r17,r0 + eor r2,r24 + eor r12,r25 + eor r10,r16 + eor r13,r17 + eor r14,r15 + eor r12,r2 + eor r13,r10 + eor r2,r14 + com r10 + std Z+6,r2 + std Z+14,r12 + std Z+30,r13 + std Z+38,r14 + ldd r12,Z+15 + ldd r13,Z+31 + ldd r14,Z+39 + eor r3,r14 + eor r11,r12 + eor r14,r13 + mov r15,r12 + mov r0,r3 + com r0 + and r15,r0 + mov r24,r11 + mov r0,r12 + com r0 + and r24,r0 + mov r25,r13 + mov r0,r11 + com r0 + and r25,r0 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + mov r17,r3 + mov r0,r14 + com r0 + and r17,r0 + eor r3,r24 + eor r12,r25 + eor r11,r16 + eor r13,r17 + eor r14,r15 + eor r12,r3 + eor r13,r11 + eor r3,r14 + com r11 + std Z+7,r3 + std Z+15,r12 + std Z+31,r13 + std Z+39,r14 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + lsl r24 + rol r25 + rol r16 + rol r17 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + bst r12,0 + lsr r15 + ror r14 + ror r13 + ror r12 + bld r15,7 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r24 + eor r5,r25 + eor r6,r16 + eor r7,r17 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r20,Z+24 + ldd r21,Z+25 + ldd r22,Z+26 + ldd r23,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+24,r20 + std Z+25,r21 + std Z+26,r22 + std Z+27,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r2 + std Z+31,r3 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r22,Z+34 + ldd r23,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + or r23,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + std Z+32,r20 + std Z+33,r21 + std Z+34,r22 + std Z+35,r23 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r12,r20 + movw r14,r22 + movw r24,r26 + movw r16,r2 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r1 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r15,r0 + mov r0,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r0 + or r17,r0 + eor r24,r20 + eor r25,r21 + eor r16,r22 + eor r17,r23 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + mov r0,r20 + mov r20,r22 + mov r22,r0 + mov r0,r21 + mov r21,r23 + mov r23,r0 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + adc r20,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + ldd r12,Z+40 + ldd r13,Z+41 + ldd r14,Z+42 + ldd r15,Z+43 + eor r12,r20 + eor r13,r21 + eor r14,r22 + eor r15,r23 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + std Z+40,r12 + std Z+41,r13 + std Z+42,r14 + std Z+43,r15 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + ldd r0,Z+24 + eor r12,r0 + ldd r0,Z+25 + eor r13,r0 + ldd r0,Z+26 + eor r14,r0 + ldd r0,Z+27 + eor r15,r0 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ldd r12,Z+48 + ldd r13,Z+49 + ldd r14,Z+50 + ldd r15,Z+51 + ldd r0,Z+8 + eor r12,r0 + ldd r0,Z+9 + eor r13,r0 + ldd r0,Z+10 + eor r14,r0 + ldd r0,Z+11 + eor r15,r0 + ldd r0,Z+28 + eor r12,r0 + ldd r0,Z+29 + eor r13,r0 + ldd r0,Z+30 + eor r14,r0 + ldd r0,Z+31 + eor r15,r0 + std Z+48,r12 + std Z+49,r13 + std Z+50,r14 + std Z+51,r15 + ldd r12,Z+52 + ldd r13,Z+53 + ldd r14,Z+54 + ldd r15,Z+55 + ldd r0,Z+12 + eor r12,r0 + ldd r0,Z+13 + eor r13,r0 + ldd r0,Z+14 + eor r14,r0 + ldd r0,Z+15 + eor r15,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + std Z+52,r12 + std Z+53,r13 + std Z+54,r14 + std Z+55,r15 + subi r18,15 + dec r19 + breq 5904f + rjmp 38b +5904: + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size drysponge128_g, .-drysponge128_g + + .text +.global gascon256_core_round + .type gascon256_core_round, @function +gascon256_core_round: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 26 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r26,Z+ + ld r27,Z+ + ld r2,Z+ + ld r3,Z+ + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + eor r4,r22 + ld r22,Z + ldd r23,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r23,r22 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r22 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r18 + eor r12,r23 + eor r13,r4 + eor r15,r14 + eor r18,r24 + com r4 + std Y+1,r18 + st Z,r22 + std Z+8,r23 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r22,Z+1 + ldd r23,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r23,r22 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r22 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r19 + eor r12,r23 + eor r13,r5 + eor r15,r14 + eor r19,r24 + com r5 + std Y+2,r19 + std Z+1,r22 + std Z+9,r23 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r22,Z+2 + ldd r23,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r23,r22 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r22 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r20 + eor r12,r23 + eor r13,r6 + eor r15,r14 + eor r20,r24 + com r6 + std Y+3,r20 + std Z+2,r22 + std Z+10,r23 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r22,Z+3 + ldd r23,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r23,r22 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r22 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r21 + eor r12,r23 + eor r13,r7 + eor r15,r14 + eor r21,r24 + com r7 + std Y+4,r21 + std Z+3,r22 + std Z+11,r23 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r22,Z+4 + ldd r23,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r26,r24 + eor r23,r22 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r22 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r26 + eor r12,r23 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+5,r26 + std Z+4,r22 + std Z+12,r23 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r22,Z+5 + ldd r23,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r27,r24 + eor r23,r22 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r22 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r27 + eor r12,r23 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+6,r27 + std Z+5,r22 + std Z+13,r23 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r22,Z+6 + ldd r23,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r2,r24 + eor r23,r22 + eor r10,r12 + eor r14,r13 + eor r24,r15 + mov r17,r2 + mov r25,r22 + mov r0,r2 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r10 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r10 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r2 + eor r12,r23 + eor r13,r10 + eor r15,r14 + eor r2,r24 + com r10 + std Y+7,r2 + std Z+6,r22 + std Z+14,r23 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r22,Z+7 + ldd r23,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r3,r24 + eor r23,r22 + eor r11,r12 + eor r14,r13 + eor r24,r15 + mov r17,r3 + mov r25,r22 + mov r0,r3 + com r0 + and r25,r0 + mov r16,r23 + mov r0,r22 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r12 + mov r0,r23 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r11 + mov r0,r12 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r13 + mov r0,r11 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r22,r3 + eor r12,r23 + eor r13,r11 + eor r15,r14 + eor r3,r24 + com r11 + std Y+8,r3 + std Z+7,r22 + std Z+15,r23 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + bst r22,0 + lsr r13 + ror r12 + ror r23 + ror r22 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r26 + std Z+21,r27 + std Z+22,r2 + std Z+23,r3 + movw r22,r4 + movw r12,r6 + movw r14,r8 + movw r24,r10 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r4 + eor r15,r5 + eor r24,r6 + eor r25,r7 + eor r22,r8 + eor r23,r9 + eor r12,r10 + eor r13,r11 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r1 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + eor r4,r14 + eor r5,r15 + eor r6,r24 + eor r7,r25 + eor r8,r22 + eor r9,r23 + eor r10,r12 + eor r11,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r12 + mov r12,r0 + mov r0,r23 + mov r23,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r26 + std Z+45,r27 + std Z+46,r2 + std Z+47,r3 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r26,Z+52 + ldd r27,Z+53 + ldd r2,Z+54 + ldd r3,Z+55 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r26 + std Z+53,r27 + std Z+54,r2 + std Z+55,r3 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r26,Z+60 + ldd r27,Z+61 + ldd r2,Z+62 + ldd r3,Z+63 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r13 + mov r13,r12 + mov r12,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + lsl r22 + rol r23 + rol r12 + rol r13 + adc r22,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r27 + ror r26 + ror r0 + or r3,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r26 + std Z+61,r27 + std Z+62,r2 + std Z+63,r3 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r2,Y+7 + ldd r3,Y+8 + movw r22,r18 + movw r12,r20 + movw r14,r26 + movw r24,r2 + mov r0,r22 + mov r22,r23 + mov r23,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + lsr r13 + ror r12 + ror r23 + ror r22 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r22,r26 + eor r23,r27 + eor r12,r2 + eor r13,r3 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r26 + mov r26,r2 + mov r2,r0 + mov r0,r27 + mov r27,r3 + mov r3,r0 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + adc r26,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r26,r22 + eor r27,r23 + eor r2,r12 + eor r3,r13 + std Z+24,r4 + std Z+25,r5 + std Z+26,r6 + std Z+27,r7 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + st -Z,r3 + st -Z,r2 + st -Z,r27 + st -Z,r26 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + adiw r28,8 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gascon256_core_round, .-gascon256_core_round + + .text +.global drysponge256_g + .type drysponge256_g, @function +drysponge256_g: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 44 + subi r30,148 + sbci r31,255 + ld r19,Z + subi r30,108 + sbc r31,r1 + ldi r18,240 + std Y+25,r19 + std Y+26,r18 + std Y+9,r1 + std Y+10,r1 + std Y+11,r1 + std Y+12,r1 + std Y+13,r1 + std Y+14,r1 + std Y+15,r1 + std Y+16,r1 + std Y+17,r1 + std Y+18,r1 + std Y+19,r1 + std Y+20,r1 + std Y+21,r1 + std Y+22,r1 + std Y+23,r1 + std Y+24,r1 + ld r18,Z+ + ld r19,Z+ + ld r20,Z+ + ld r21,Z+ + ld r22,Z+ + ld r23,Z+ + ld r26,Z+ + ld r27,Z+ + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 +40: + ldd r24,Y+26 + eor r2,r24 + subi r24,15 + std Y+26,r24 + ld r10,Z + ldd r11,Z+8 + ldd r12,Z+16 + ldd r13,Z+32 + ldd r14,Z+40 + ldd r15,Z+48 + ldd r24,Z+56 + eor r18,r24 + eor r11,r10 + eor r2,r12 + eor r14,r13 + eor r24,r15 + mov r17,r18 + mov r25,r10 + mov r0,r18 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r18,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r2 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r2 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r2,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r18 + eor r12,r11 + eor r13,r2 + eor r15,r14 + eor r18,r24 + com r2 + std Y+1,r18 + st Z,r10 + std Z+8,r11 + std Z+16,r12 + std Z+32,r13 + std Z+40,r14 + std Z+48,r15 + std Z+56,r24 + ldd r10,Z+1 + ldd r11,Z+9 + ldd r12,Z+17 + ldd r13,Z+33 + ldd r14,Z+41 + ldd r15,Z+49 + ldd r24,Z+57 + eor r19,r24 + eor r11,r10 + eor r3,r12 + eor r14,r13 + eor r24,r15 + mov r17,r19 + mov r25,r10 + mov r0,r19 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r19,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r3 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r3 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r3,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r19 + eor r12,r11 + eor r13,r3 + eor r15,r14 + eor r19,r24 + com r3 + std Y+2,r19 + std Z+1,r10 + std Z+9,r11 + std Z+17,r12 + std Z+33,r13 + std Z+41,r14 + std Z+49,r15 + std Z+57,r24 + ldd r10,Z+2 + ldd r11,Z+10 + ldd r12,Z+18 + ldd r13,Z+34 + ldd r14,Z+42 + ldd r15,Z+50 + ldd r24,Z+58 + eor r20,r24 + eor r11,r10 + eor r4,r12 + eor r14,r13 + eor r24,r15 + mov r17,r20 + mov r25,r10 + mov r0,r20 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r20,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r4 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r4 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r4,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r20 + eor r12,r11 + eor r13,r4 + eor r15,r14 + eor r20,r24 + com r4 + std Y+3,r20 + std Z+2,r10 + std Z+10,r11 + std Z+18,r12 + std Z+34,r13 + std Z+42,r14 + std Z+50,r15 + std Z+58,r24 + ldd r10,Z+3 + ldd r11,Z+11 + ldd r12,Z+19 + ldd r13,Z+35 + ldd r14,Z+43 + ldd r15,Z+51 + ldd r24,Z+59 + eor r21,r24 + eor r11,r10 + eor r5,r12 + eor r14,r13 + eor r24,r15 + mov r17,r21 + mov r25,r10 + mov r0,r21 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r21,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r5 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r5 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r5,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r21 + eor r12,r11 + eor r13,r5 + eor r15,r14 + eor r21,r24 + com r5 + std Y+4,r21 + std Z+3,r10 + std Z+11,r11 + std Z+19,r12 + std Z+35,r13 + std Z+43,r14 + std Z+51,r15 + std Z+59,r24 + ldd r10,Z+4 + ldd r11,Z+12 + ldd r12,Z+20 + ldd r13,Z+36 + ldd r14,Z+44 + ldd r15,Z+52 + ldd r24,Z+60 + eor r22,r24 + eor r11,r10 + eor r6,r12 + eor r14,r13 + eor r24,r15 + mov r17,r22 + mov r25,r10 + mov r0,r22 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r22,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r6 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r6 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r6,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r22 + eor r12,r11 + eor r13,r6 + eor r15,r14 + eor r22,r24 + com r6 + std Y+5,r22 + std Z+4,r10 + std Z+12,r11 + std Z+20,r12 + std Z+36,r13 + std Z+44,r14 + std Z+52,r15 + std Z+60,r24 + ldd r10,Z+5 + ldd r11,Z+13 + ldd r12,Z+21 + ldd r13,Z+37 + ldd r14,Z+45 + ldd r15,Z+53 + ldd r24,Z+61 + eor r23,r24 + eor r11,r10 + eor r7,r12 + eor r14,r13 + eor r24,r15 + mov r17,r23 + mov r25,r10 + mov r0,r23 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r23,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r7 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r7 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r7,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r23 + eor r12,r11 + eor r13,r7 + eor r15,r14 + eor r23,r24 + com r7 + std Y+6,r23 + std Z+5,r10 + std Z+13,r11 + std Z+21,r12 + std Z+37,r13 + std Z+45,r14 + std Z+53,r15 + std Z+61,r24 + ldd r10,Z+6 + ldd r11,Z+14 + ldd r12,Z+22 + ldd r13,Z+38 + ldd r14,Z+46 + ldd r15,Z+54 + ldd r24,Z+62 + eor r26,r24 + eor r11,r10 + eor r8,r12 + eor r14,r13 + eor r24,r15 + mov r17,r26 + mov r25,r10 + mov r0,r26 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r26,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r8 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r8 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r8,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r26 + eor r12,r11 + eor r13,r8 + eor r15,r14 + eor r26,r24 + com r8 + std Y+7,r26 + std Z+6,r10 + std Z+14,r11 + std Z+22,r12 + std Z+38,r13 + std Z+46,r14 + std Z+54,r15 + std Z+62,r24 + ldd r10,Z+7 + ldd r11,Z+15 + ldd r12,Z+23 + ldd r13,Z+39 + ldd r14,Z+47 + ldd r15,Z+55 + ldd r24,Z+63 + eor r27,r24 + eor r11,r10 + eor r9,r12 + eor r14,r13 + eor r24,r15 + mov r17,r27 + mov r25,r10 + mov r0,r27 + com r0 + and r25,r0 + mov r16,r11 + mov r0,r10 + com r0 + and r16,r0 + eor r27,r16 + mov r16,r12 + mov r0,r11 + com r0 + and r16,r0 + eor r10,r16 + mov r16,r9 + mov r0,r12 + com r0 + and r16,r0 + eor r11,r16 + mov r16,r13 + mov r0,r9 + com r0 + and r16,r0 + eor r12,r16 + mov r16,r14 + mov r0,r13 + com r0 + and r16,r0 + eor r9,r16 + mov r16,r15 + mov r0,r14 + com r0 + and r16,r0 + eor r13,r16 + mov r16,r24 + mov r0,r15 + com r0 + and r16,r0 + eor r14,r16 + mov r0,r24 + com r0 + and r17,r0 + eor r15,r17 + eor r24,r25 + eor r10,r27 + eor r12,r11 + eor r13,r9 + eor r15,r14 + eor r27,r24 + com r9 + std Y+8,r27 + std Z+7,r10 + std Z+15,r11 + std Z+23,r12 + std Z+39,r13 + std Z+47,r14 + std Z+55,r15 + std Z+63,r24 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + bst r10,0 + lsr r13 + ror r12 + ror r11 + ror r10 + bld r13,7 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+12,r22 + std Z+13,r23 + std Z+14,r26 + std Z+15,r27 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r26,Z+22 + ldd r27,Z+23 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r23 + mov r23,r26 + mov r26,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r22 + std Z+21,r23 + std Z+22,r26 + std Z+23,r27 + movw r10,r2 + movw r12,r4 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + mov r0,r2 + mov r2,r4 + mov r4,r0 + mov r0,r3 + mov r3,r5 + mov r5,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + lsr r5 + ror r4 + ror r3 + ror r2 + ror r0 + or r5,r0 + mov r0,r6 + mov r6,r8 + mov r8,r0 + mov r0,r7 + mov r7,r9 + mov r9,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r6,r10 + eor r7,r11 + eor r8,r12 + eor r9,r13 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r26,Z+38 + ldd r27,Z+39 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r12 + mov r12,r0 + mov r0,r11 + mov r11,r13 + mov r13,r0 + mov r0,r14 + mov r14,r24 + mov r24,r0 + mov r0,r15 + mov r15,r25 + mov r25,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + std Z+36,r22 + std Z+37,r23 + std Z+38,r26 + std Z+39,r27 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r22,Z+44 + ldd r23,Z+45 + ldd r26,Z+46 + ldd r27,Z+47 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+40,r18 + std Z+41,r19 + std Z+42,r20 + std Z+43,r21 + std Z+44,r22 + std Z+45,r23 + std Z+46,r26 + std Z+47,r27 + ldd r18,Z+48 + ldd r19,Z+49 + ldd r20,Z+50 + ldd r21,Z+51 + ldd r22,Z+52 + ldd r23,Z+53 + ldd r26,Z+54 + ldd r27,Z+55 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+48,r18 + std Z+49,r19 + std Z+50,r20 + std Z+51,r21 + std Z+52,r22 + std Z+53,r23 + std Z+54,r26 + std Z+55,r27 + ldd r18,Z+56 + ldd r19,Z+57 + ldd r20,Z+58 + ldd r21,Z+59 + ldd r22,Z+60 + ldd r23,Z+61 + ldd r26,Z+62 + ldd r27,Z+63 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + lsl r10 + rol r11 + rol r12 + rol r13 + adc r10,r1 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r21,r0 + mov r0,r27 + mov r27,r26 + mov r26,r23 + mov r23,r22 + mov r22,r0 + mov r0,r1 + lsr r27 + ror r26 + ror r23 + ror r22 + ror r0 + or r27,r0 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + std Z+56,r18 + std Z+57,r19 + std Z+58,r20 + std Z+59,r21 + std Z+60,r22 + std Z+61,r23 + std Z+62,r26 + std Z+63,r27 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + movw r10,r18 + movw r12,r20 + movw r14,r22 + movw r24,r26 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r14,r18 + eor r15,r19 + eor r24,r20 + eor r25,r21 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + mov r0,r22 + mov r22,r26 + mov r26,r0 + mov r0,r23 + mov r23,r27 + mov r27,r0 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + lsl r22 + rol r23 + rol r26 + rol r27 + adc r22,r1 + eor r18,r14 + eor r19,r15 + eor r20,r24 + eor r21,r25 + eor r22,r10 + eor r23,r11 + eor r26,r12 + eor r27,r13 + ldd r10,Y+9 + ldd r11,Y+10 + ldd r12,Y+11 + ldd r13,Y+12 + eor r10,r18 + eor r11,r19 + eor r12,r20 + eor r13,r21 + ldd r0,Z+12 + eor r10,r0 + ldd r0,Z+13 + eor r11,r0 + ldd r0,Z+14 + eor r12,r0 + ldd r0,Z+15 + eor r13,r0 + ldd r0,Z+32 + eor r10,r0 + ldd r0,Z+33 + eor r11,r0 + ldd r0,Z+34 + eor r12,r0 + ldd r0,Z+35 + eor r13,r0 + ldd r0,Z+52 + eor r10,r0 + ldd r0,Z+53 + eor r11,r0 + ldd r0,Z+54 + eor r12,r0 + ldd r0,Z+55 + eor r13,r0 + std Y+9,r10 + std Y+10,r11 + std Y+11,r12 + std Y+12,r13 + ldd r10,Y+13 + ldd r11,Y+14 + ldd r12,Y+15 + ldd r13,Y+16 + eor r10,r22 + eor r11,r23 + eor r12,r26 + eor r13,r27 + ldd r0,Z+16 + eor r10,r0 + ldd r0,Z+17 + eor r11,r0 + ldd r0,Z+18 + eor r12,r0 + ldd r0,Z+19 + eor r13,r0 + ldd r0,Z+36 + eor r10,r0 + ldd r0,Z+37 + eor r11,r0 + ldd r0,Z+38 + eor r12,r0 + ldd r0,Z+39 + eor r13,r0 + ldd r0,Z+40 + eor r10,r0 + ldd r0,Z+41 + eor r11,r0 + ldd r0,Z+42 + eor r12,r0 + ldd r0,Z+43 + eor r13,r0 + std Y+13,r10 + std Y+14,r11 + std Y+15,r12 + std Y+16,r13 + ldd r10,Y+17 + ldd r11,Y+18 + ldd r12,Y+19 + ldd r13,Y+20 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + ld r0,Z + eor r10,r0 + ldd r0,Z+1 + eor r11,r0 + ldd r0,Z+2 + eor r12,r0 + ldd r0,Z+3 + eor r13,r0 + ldd r0,Z+20 + eor r10,r0 + ldd r0,Z+21 + eor r11,r0 + ldd r0,Z+22 + eor r12,r0 + ldd r0,Z+23 + eor r13,r0 + ldd r0,Z+44 + eor r10,r0 + ldd r0,Z+45 + eor r11,r0 + ldd r0,Z+46 + eor r12,r0 + ldd r0,Z+47 + eor r13,r0 + std Y+17,r10 + std Y+18,r11 + std Y+19,r12 + std Y+20,r13 + ldd r10,Y+21 + ldd r11,Y+22 + ldd r12,Y+23 + ldd r13,Y+24 + eor r10,r6 + eor r11,r7 + eor r12,r8 + eor r13,r9 + ldd r0,Z+4 + eor r10,r0 + ldd r0,Z+5 + eor r11,r0 + ldd r0,Z+6 + eor r12,r0 + ldd r0,Z+7 + eor r13,r0 + ldd r0,Z+8 + eor r10,r0 + ldd r0,Z+9 + eor r11,r0 + ldd r0,Z+10 + eor r12,r0 + ldd r0,Z+11 + eor r13,r0 + ldd r0,Z+48 + eor r10,r0 + ldd r0,Z+49 + eor r11,r0 + ldd r0,Z+50 + eor r12,r0 + ldd r0,Z+51 + eor r13,r0 + std Y+21,r10 + std Y+22,r11 + std Y+23,r12 + std Y+24,r13 + ldd r10,Y+25 + dec r10 + std Y+25,r10 + breq 6623f + rjmp 40b +6623: + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + std Z+28,r6 + std Z+29,r7 + std Z+30,r8 + std Z+31,r9 + st -Z,r27 + st -Z,r26 + st -Z,r23 + st -Z,r22 + st -Z,r21 + st -Z,r20 + st -Z,r19 + st -Z,r18 + ldi r25,72 + add r30,r25 + adc r31,r1 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + ldd r2,Y+17 + ldd r3,Y+18 + ldd r4,Y+19 + ldd r5,Y+20 + ldd r6,Y+21 + ldd r7,Y+22 + ldd r8,Y+23 + ldd r9,Y+24 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + std Z+12,r6 + std Z+13,r7 + std Z+14,r8 + std Z+15,r9 + adiw r28,26 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size drysponge256_g, .-drysponge256_g + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.c b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.c new file mode 100644 index 0000000..6dfe48c --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.c @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-drysponge.h" +#include + +#if !defined(__AVR__) + +/* Right rotations in bit-interleaved format */ +#define intRightRotateEven(x,bits) \ + (__extension__ ({ \ + uint32_t _x0 = (uint32_t)(x); \ + uint32_t _x1 = (uint32_t)((x) >> 32); \ + _x0 = rightRotate(_x0, (bits)); \ + _x1 = rightRotate(_x1, (bits)); \ + _x0 | (((uint64_t)_x1) << 32); \ + })) +#define intRightRotateOdd(x,bits) \ + (__extension__ ({ \ + uint32_t _x0 = (uint32_t)(x); \ + uint32_t _x1 = (uint32_t)((x) >> 32); \ + _x0 = rightRotate(_x0, ((bits) + 1) % 32); \ + _x1 = rightRotate(_x1, (bits)); \ + _x1 | (((uint64_t)_x0) << 32); \ + })) +#define intRightRotate1_64(x) \ + (__extension__ ({ \ + uint32_t _x0 = (uint32_t)(x); \ + uint32_t _x1 = (uint32_t)((x) >> 32); \ + _x0 = rightRotate1(_x0); \ + _x1 | (((uint64_t)_x0) << 32); \ + })) +#define intRightRotate2_64(x) (intRightRotateEven((x), 1)) +#define intRightRotate3_64(x) (intRightRotateOdd((x), 1)) +#define intRightRotate4_64(x) (intRightRotateEven((x), 2)) +#define intRightRotate5_64(x) (intRightRotateOdd((x), 2)) +#define intRightRotate6_64(x) (intRightRotateEven((x), 3)) +#define intRightRotate7_64(x) (intRightRotateOdd((x), 3)) +#define intRightRotate8_64(x) (intRightRotateEven((x), 4)) +#define intRightRotate9_64(x) (intRightRotateOdd((x), 4)) +#define intRightRotate10_64(x) (intRightRotateEven((x), 5)) +#define intRightRotate11_64(x) (intRightRotateOdd((x), 5)) +#define intRightRotate12_64(x) (intRightRotateEven((x), 6)) +#define intRightRotate13_64(x) (intRightRotateOdd((x), 6)) +#define intRightRotate14_64(x) (intRightRotateEven((x), 7)) +#define intRightRotate15_64(x) (intRightRotateOdd((x), 7)) +#define intRightRotate16_64(x) (intRightRotateEven((x), 8)) +#define intRightRotate17_64(x) (intRightRotateOdd((x), 8)) +#define intRightRotate18_64(x) (intRightRotateEven((x), 9)) +#define intRightRotate19_64(x) (intRightRotateOdd((x), 9)) +#define intRightRotate20_64(x) (intRightRotateEven((x), 10)) +#define intRightRotate21_64(x) (intRightRotateOdd((x), 10)) +#define intRightRotate22_64(x) (intRightRotateEven((x), 11)) +#define intRightRotate23_64(x) (intRightRotateOdd((x), 11)) +#define intRightRotate24_64(x) (intRightRotateEven((x), 12)) +#define intRightRotate25_64(x) (intRightRotateOdd((x), 12)) +#define intRightRotate26_64(x) (intRightRotateEven((x), 13)) +#define intRightRotate27_64(x) (intRightRotateOdd((x), 13)) +#define intRightRotate28_64(x) (intRightRotateEven((x), 14)) +#define intRightRotate29_64(x) (intRightRotateOdd((x), 14)) +#define intRightRotate30_64(x) (intRightRotateEven((x), 15)) +#define intRightRotate31_64(x) (intRightRotateOdd((x), 15)) +#define intRightRotate32_64(x) (intRightRotateEven((x), 16)) +#define intRightRotate33_64(x) (intRightRotateOdd((x), 16)) +#define intRightRotate34_64(x) (intRightRotateEven((x), 17)) +#define intRightRotate35_64(x) (intRightRotateOdd((x), 17)) +#define intRightRotate36_64(x) (intRightRotateEven((x), 18)) +#define intRightRotate37_64(x) (intRightRotateOdd((x), 18)) +#define intRightRotate38_64(x) (intRightRotateEven((x), 19)) +#define intRightRotate39_64(x) (intRightRotateOdd((x), 19)) +#define intRightRotate40_64(x) (intRightRotateEven((x), 20)) +#define intRightRotate41_64(x) (intRightRotateOdd((x), 20)) +#define intRightRotate42_64(x) (intRightRotateEven((x), 21)) +#define intRightRotate43_64(x) (intRightRotateOdd((x), 21)) +#define intRightRotate44_64(x) (intRightRotateEven((x), 22)) +#define intRightRotate45_64(x) (intRightRotateOdd((x), 22)) +#define intRightRotate46_64(x) (intRightRotateEven((x), 23)) +#define intRightRotate47_64(x) (intRightRotateOdd((x), 23)) +#define intRightRotate48_64(x) (intRightRotateEven((x), 24)) +#define intRightRotate49_64(x) (intRightRotateOdd((x), 24)) +#define intRightRotate50_64(x) (intRightRotateEven((x), 25)) +#define intRightRotate51_64(x) (intRightRotateOdd((x), 25)) +#define intRightRotate52_64(x) (intRightRotateEven((x), 26)) +#define intRightRotate53_64(x) (intRightRotateOdd((x), 26)) +#define intRightRotate54_64(x) (intRightRotateEven((x), 27)) +#define intRightRotate55_64(x) (intRightRotateOdd((x), 27)) +#define intRightRotate56_64(x) (intRightRotateEven((x), 28)) +#define intRightRotate57_64(x) (intRightRotateOdd((x), 28)) +#define intRightRotate58_64(x) (intRightRotateEven((x), 29)) +#define intRightRotate59_64(x) (intRightRotateOdd((x), 29)) +#define intRightRotate60_64(x) (intRightRotateEven((x), 30)) +#define intRightRotate61_64(x) (intRightRotateOdd((x), 30)) +#define intRightRotate62_64(x) (intRightRotateEven((x), 31)) +#define intRightRotate63_64(x) (intRightRotateOdd((x), 31)) + +void gascon128_core_round(gascon128_state_t *state, uint8_t round) +{ + uint64_t t0, t1, t2, t3, t4; + + /* Load the state into local varaibles */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); +#endif + + /* Add the round constant to the middle of the state */ + x2 ^= ((0x0F - round) << 4) | round; + + /* Substitution layer */ + x0 ^= x4; x2 ^= x1; x4 ^= x3; t0 = (~x0) & x1; t1 = (~x1) & x2; + t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x0; x0 ^= t1; + x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; x1 ^= x0; x3 ^= x2; + x0 ^= x4; x2 = ~x2; + + /* Linear diffusion layer */ + x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); + x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); + x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); + x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); + x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); + + /* Write the local variables back to the state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); +#endif +} + +void gascon256_core_round(gascon256_state_t *state, uint8_t round) +{ + uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8; + + /* Load the state into local varaibles */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; + uint64_t x5 = state->S[5]; + uint64_t x6 = state->S[6]; + uint64_t x7 = state->S[7]; + uint64_t x8 = state->S[8]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); + uint64_t x5 = le_load_word64(state->B + 40); + uint64_t x6 = le_load_word64(state->B + 48); + uint64_t x7 = le_load_word64(state->B + 56); + uint64_t x8 = le_load_word64(state->B + 64); +#endif + + /* Add the round constant to the middle of the state */ + x4 ^= ((0x0F - round) << 4) | round; + + /* Substitution layer */ + x0 ^= x8; x2 ^= x1; x4 ^= x3; x6 ^= x5; x8 ^= x7; t0 = (~x0) & x1; + t1 = (~x1) & x2; t2 = (~x2) & x3; t3 = (~x3) & x4; t4 = (~x4) & x5; + t5 = (~x5) & x6; t6 = (~x6) & x7; t7 = (~x7) & x8; t8 = (~x8) & x0; + x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t5; x5 ^= t6; x6 ^= t7; + x7 ^= t8; x8 ^= t0; x1 ^= x0; x3 ^= x2; x5 ^= x4; x7 ^= x6; x0 ^= x8; + x4 = ~x4; + + /* Linear diffusion layer */ + x0 ^= intRightRotate19_64(x0) ^ intRightRotate28_64(x0); + x1 ^= intRightRotate61_64(x1) ^ intRightRotate38_64(x1); + x2 ^= intRightRotate1_64(x2) ^ intRightRotate6_64(x2); + x3 ^= intRightRotate10_64(x3) ^ intRightRotate17_64(x3); + x4 ^= intRightRotate7_64(x4) ^ intRightRotate40_64(x4); + x5 ^= intRightRotate31_64(x5) ^ intRightRotate26_64(x5); + x6 ^= intRightRotate53_64(x6) ^ intRightRotate58_64(x6); + x7 ^= intRightRotate9_64(x7) ^ intRightRotate46_64(x7); + x8 ^= intRightRotate43_64(x8) ^ intRightRotate50_64(x8); + + /* Write the local variables back to the state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; + state->S[5] = x5; + state->S[6] = x6; + state->S[7] = x7; + state->S[8] = x8; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); + le_store_word64(state->B + 40, x5); + le_store_word64(state->B + 48, x6); + le_store_word64(state->B + 56, x7); + le_store_word64(state->B + 64, x8); +#endif +} + +void drysponge128_g(drysponge128_state_t *state) +{ + unsigned round; + + /* Perform the first round. For each round we XOR the 16 bytes of + * the output data with the first 16 bytes of the state. And then + * XOR with the next 16 bytes of the state, rotated by 4 bytes */ + gascon128_core_round(&(state->c), 0); + state->r.W[0] = state->c.W[0] ^ state->c.W[5]; + state->r.W[1] = state->c.W[1] ^ state->c.W[6]; + state->r.W[2] = state->c.W[2] ^ state->c.W[7]; + state->r.W[3] = state->c.W[3] ^ state->c.W[4]; + + /* Perform the rest of the rounds */ + for (round = 1; round < state->rounds; ++round) { + gascon128_core_round(&(state->c), round); + state->r.W[0] ^= state->c.W[0] ^ state->c.W[5]; + state->r.W[1] ^= state->c.W[1] ^ state->c.W[6]; + state->r.W[2] ^= state->c.W[2] ^ state->c.W[7]; + state->r.W[3] ^= state->c.W[3] ^ state->c.W[4]; + } +} + +void drysponge256_g(drysponge256_state_t *state) +{ + unsigned round; + + /* Perform the first round. For each round we XOR the 16 bytes of + * the output data with the first 16 bytes of the state. And then + * XOR with the next 16 bytes of the state, rotated by 4 bytes. + * And so on for a total of 64 bytes XOR'ed into the output data. */ + gascon256_core_round(&(state->c), 0); + state->r.W[0] = state->c.W[0] ^ state->c.W[5] ^ + state->c.W[10] ^ state->c.W[15]; + state->r.W[1] = state->c.W[1] ^ state->c.W[6] ^ + state->c.W[11] ^ state->c.W[12]; + state->r.W[2] = state->c.W[2] ^ state->c.W[7] ^ + state->c.W[8] ^ state->c.W[13]; + state->r.W[3] = state->c.W[3] ^ state->c.W[4] ^ + state->c.W[9] ^ state->c.W[14]; + + /* Perform the rest of the rounds */ + for (round = 1; round < state->rounds; ++round) { + gascon256_core_round(&(state->c), round); + state->r.W[0] ^= state->c.W[0] ^ state->c.W[5] ^ + state->c.W[10] ^ state->c.W[15]; + state->r.W[1] ^= state->c.W[1] ^ state->c.W[6] ^ + state->c.W[11] ^ state->c.W[12]; + state->r.W[2] ^= state->c.W[2] ^ state->c.W[7] ^ + state->c.W[8] ^ state->c.W[13]; + state->r.W[3] ^= state->c.W[3] ^ state->c.W[4] ^ + state->c.W[9] ^ state->c.W[14]; + } +} + +#endif /* !__AVR__ */ + +void drysponge128_g_core(drysponge128_state_t *state) +{ + unsigned round; + for (round = 0; round < state->rounds; ++round) + gascon128_core_round(&(state->c), round); +} + +void drysponge256_g_core(drysponge256_state_t *state) +{ + unsigned round; + for (round = 0; round < state->rounds; ++round) + gascon256_core_round(&(state->c), round); +} + +/** + * \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) + * \brief Selects an element of x in constant time. + * + * \param x Points to the four elements of x. + * \param index Index of which element to extract between 0 and 3. + * + * \return The selected element of x. + */ +#if !defined(__AVR__) +STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index) +{ + /* We need to be careful how we select each element of x because + * we are doing a data-dependent fetch here. Do the fetch in a way + * that should avoid cache timing issues by fetching every element + * of x and masking away the ones we don't want. + * + * There is a possible side channel here with respect to power analysis. + * The "mask" value will be all-ones for the selected index and all-zeroes + * for the other indexes. This may show up as different power consumption + * for the "result ^= x[i] & mask" statement when i is the selected index. + * Such a side channel could in theory allow reading the plaintext input + * to the cipher by analysing the CPU's power consumption. + * + * The DryGASCON specification acknowledges the possibility of plaintext + * recovery in section 7.4. For software mitigation the specification + * suggests randomization of the indexes into c and x and randomization + * of the order of processing words. We aren't doing that here yet. + * Patches welcome to fix this. + */ + uint32_t mask = -((uint32_t)((0x04 - index) >> 2)); + uint32_t result = x[0] & mask; + mask = -((uint32_t)((0x04 - (index ^ 0x01)) >> 2)); + result ^= x[1] & mask; + mask = -((uint32_t)((0x04 - (index ^ 0x02)) >> 2)); + result ^= x[2] & mask; + mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2)); + return result ^ (x[3] & mask); +} +#else +/* AVR is more or less immune to cache timing issues because it doesn't + * have anything like an L1 or L2 cache. Select the word directly */ +#define drysponge_select_x(x, index) ((x)[(index)]) +#endif + +/** + * \brief Mixes a 32-bit value into the DrySPONGE128 state. + * + * \param state DrySPONGE128 state. + * \param data The data to be mixed in the bottom 10 bits. + */ +static void drysponge128_mix_phase_round + (drysponge128_state_t *state, uint32_t data) +{ + /* Mix in elements from x according to the 2-bit indexes in the data */ + state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); + state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); + state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); + state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); + state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); +} + +/** + * \brief Mixes a 32-bit value into the DrySPONGE256 state. + * + * \param state DrySPONGE256 state. + * \param data The data to be mixed in the bottom 18 bits. + */ +static void drysponge256_mix_phase_round + (drysponge256_state_t *state, uint32_t data) +{ + /* Mix in elements from x according to the 2-bit indexes in the data */ + state->c.W[0] ^= drysponge_select_x(state->x.W, data & 0x03); + state->c.W[2] ^= drysponge_select_x(state->x.W, (data >> 2) & 0x03); + state->c.W[4] ^= drysponge_select_x(state->x.W, (data >> 4) & 0x03); + state->c.W[6] ^= drysponge_select_x(state->x.W, (data >> 6) & 0x03); + state->c.W[8] ^= drysponge_select_x(state->x.W, (data >> 8) & 0x03); + state->c.W[10] ^= drysponge_select_x(state->x.W, (data >> 10) & 0x03); + state->c.W[12] ^= drysponge_select_x(state->x.W, (data >> 12) & 0x03); + state->c.W[14] ^= drysponge_select_x(state->x.W, (data >> 14) & 0x03); + state->c.W[16] ^= drysponge_select_x(state->x.W, (data >> 16) & 0x03); +} + +/** + * \brief Mixes an input block into a DrySPONGE128 state. + * + * \param state The DrySPONGE128 state. + * \param data Full rate block containing the input data. + */ +static void drysponge128_mix_phase + (drysponge128_state_t *state, const unsigned char data[DRYSPONGE128_RATE]) +{ + /* Mix 10-bit groups into the output, with the domain + * separator added to the last two groups */ + drysponge128_mix_phase_round + (state, data[0] | (((uint32_t)(data[1])) << 8)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[1] >> 2) | (((uint32_t)(data[2])) << 6)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[2] >> 4) | (((uint32_t)(data[3])) << 4)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[3] >> 6) | (((uint32_t)(data[4])) << 2)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, data[5] | (((uint32_t)(data[6])) << 8)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[6] >> 2) | (((uint32_t)(data[7])) << 6)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[7] >> 4) | (((uint32_t)(data[8])) << 4)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[8] >> 6) | (((uint32_t)(data[9])) << 2)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, data[10] | (((uint32_t)(data[11])) << 8)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, (data[12] >> 4) | (((uint32_t)(data[13])) << 4)); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round + (state, ((data[13] >> 6) | (((uint32_t)(data[14])) << 2))); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round(state, data[15] ^ state->domain); + gascon128_core_round(&(state->c), 0); + drysponge128_mix_phase_round(state, state->domain >> 10); + + /* Revert to the default domain separator for the next block */ + state->domain = 0; +} + +/** + * \brief Mixes an input block into a DrySPONGE256 state. + * + * \param state The DrySPONGE256 state. + * \param data Full rate block containing the input data. + */ +static void drysponge256_mix_phase + (drysponge256_state_t *state, const unsigned char data[DRYSPONGE256_RATE]) +{ + /* Mix 18-bit groups into the output, with the domain in the last group */ + drysponge256_mix_phase_round + (state, data[0] | (((uint32_t)(data[1])) << 8) | + (((uint32_t)(data[2])) << 16)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[2] >> 2) | (((uint32_t)(data[3])) << 6) | + (((uint32_t)(data[4])) << 14)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[4] >> 4) | (((uint32_t)(data[5])) << 4) | + (((uint32_t)(data[6])) << 12)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[6] >> 6) | (((uint32_t)(data[7])) << 2) | + (((uint32_t)(data[8])) << 10)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, data[9] | (((uint32_t)(data[10])) << 8) | + (((uint32_t)(data[11])) << 16)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[11] >> 2) | (((uint32_t)(data[12])) << 6) | + (((uint32_t)(data[13])) << 14)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[13] >> 4) | (((uint32_t)(data[14])) << 4) | + (((uint32_t)(data[15])) << 12)); + gascon256_core_round(&(state->c), 0); + drysponge256_mix_phase_round + (state, (data[15] >> 6) ^ state->domain); + + /* Revert to the default domain separator for the next block */ + state->domain = 0; +} + +void drysponge128_f_absorb + (drysponge128_state_t *state, const unsigned char *input, unsigned len) +{ + if (len >= DRYSPONGE128_RATE) { + drysponge128_mix_phase(state, input); + } else { + unsigned char padded[DRYSPONGE128_RATE]; + memcpy(padded, input, len); + padded[len] = 0x01; + memset(padded + len + 1, 0, DRYSPONGE128_RATE - len - 1); + drysponge128_mix_phase(state, padded); + } +} + +void drysponge256_f_absorb + (drysponge256_state_t *state, const unsigned char *input, unsigned len) +{ + if (len >= DRYSPONGE256_RATE) { + drysponge256_mix_phase(state, input); + } else { + unsigned char padded[DRYSPONGE256_RATE]; + memcpy(padded, input, len); + padded[len] = 0x01; + memset(padded + len + 1, 0, DRYSPONGE256_RATE - len - 1); + drysponge256_mix_phase(state, padded); + } +} + +/** + * \brief Determine if some of the words of an "x" value are identical. + * + * \param x Points to the "x" buffer to check. + * + * \return Non-zero if some of the words are the same, zero if they are + * distinct from each other. + * + * We try to perform the check in constant time to avoid giving away + * any information about the value of the key. + */ +static int drysponge_x_words_are_same(const uint32_t x[4]) +{ + unsigned i, j; + int result = 0; + for (i = 0; i < 3; ++i) { + for (j = i + 1; j < 4; ++j) { + uint32_t check = x[i] ^ x[j]; + result |= (int)((0x100000000ULL - check) >> 32); + } + } + return result; +} + +void drysponge128_setup + (drysponge128_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block) +{ + /* Fill the GASCON-128 state with repeated copies of the key */ + memcpy(state->c.B, key, 16); + memcpy(state->c.B + 16, key, 16); + memcpy(state->c.B + 32, key, 8); + + /* Generate the "x" value for the state. All four words of "x" + * must be unique because they will be used in drysponge_select_x() + * as stand-ins for the bit pairs 00, 01, 10, and 11. + * + * Run the core block operation over and over until "x" is unique. + * Technically the runtime here is key-dependent and not constant. + * If the input key is randomized, this should only take 1 round + * on average so it is "almost constant time". + */ + do { + gascon128_core_round(&(state->c), 0); + } while (drysponge_x_words_are_same(state->c.W)); + memcpy(state->x.W, state->c.W, sizeof(state->x)); + + /* Replace the generated "x" value in the state with the key prefix */ + memcpy(state->c.W, key, sizeof(state->x)); + + /* Absorb the nonce into the state with an increased number of rounds */ + state->rounds = DRYSPONGE128_INIT_ROUNDS; + state->domain = DRYDOMAIN128_NONCE; + if (final_block) + state->domain |= DRYDOMAIN128_FINAL; + drysponge128_f_absorb(state, nonce, 16); + drysponge128_g(state); + + /* Set up the normal number of rounds for future operations */ + state->rounds = DRYSPONGE128_ROUNDS; +} + +void drysponge256_setup + (drysponge256_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block) +{ + /* Fill the GASCON-256 state with repeated copies of the key */ + memcpy(state->c.B, key, 32); + memcpy(state->c.B + 32, key, 32); + memcpy(state->c.B + 64, key, 8); + + /* Generate the "x" value for the state */ + do { + gascon256_core_round(&(state->c), 0); + } while (drysponge_x_words_are_same(state->c.W)); + memcpy(state->x.W, state->c.W, sizeof(state->x)); + + /* Replace the generated "x" value in the state with the key prefix */ + memcpy(state->c.W, key, sizeof(state->x)); + + /* Absorb the nonce into the state with an increased number of rounds */ + state->rounds = DRYSPONGE256_INIT_ROUNDS; + state->domain = DRYDOMAIN256_NONCE; + if (final_block) + state->domain |= DRYDOMAIN256_FINAL; + drysponge256_f_absorb(state, nonce, 16); + drysponge256_g(state); + + /* Set up the normal number of rounds for future operations */ + state->rounds = DRYSPONGE256_ROUNDS; +} diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.h new file mode 100644 index 0000000..05b0c16 --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-drysponge.h @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_DRYSPONGE_H +#define LW_INTERNAL_DRYSPONGE_H + +#include "internal-util.h" + +/** + * \file internal-drysponge.h + * \brief Internal implementation of DrySPONGE for the DryGASCON cipher. + * + * References: https://github.com/sebastien-riou/DryGASCON + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the GASCON-128 permutation state in bytes. + */ +#define GASCON128_STATE_SIZE 40 + +/** + * \brief Size of the GASCON-256 permutation state in bytes. + */ +#define GASCON256_STATE_SIZE 72 + +/** + * \brief Rate of absorption and squeezing for DrySPONGE128. + */ +#define DRYSPONGE128_RATE 16 + +/** + * \brief Rate of absorption and squeezing for DrySPONGE256. + */ +#define DRYSPONGE256_RATE 16 + +/** + * \brief Size of the "x" value for DrySPONGE128. + */ +#define DRYSPONGE128_XSIZE 16 + +/** + * \brief Size of the "x" value for DrySPONGE256. + */ +#define DRYSPONGE256_XSIZE 16 + +/** + * \brief Normal number of rounds for DrySPONGE128 when absorbing + * and squeezing data. + */ +#define DRYSPONGE128_ROUNDS 7 + +/** + * \brief Number of rounds for DrySPONGE128 during initialization. + */ +#define DRYSPONGE128_INIT_ROUNDS 11 + +/** + * \brief Normal number of rounds for DrySPONGE256 when absorbing + * and squeezing data. + */ +#define DRYSPONGE256_ROUNDS 8 + +/** + * \brief Number of rounds for DrySPONGE256 during initialization. + */ +#define DRYSPONGE256_INIT_ROUNDS 12 + +/** + * \brief DrySPONGE128 domain bit for a padded block. + */ +#define DRYDOMAIN128_PADDED (1 << 8) + +/** + * \brief DrySPONGE128 domain bit for a final block. + */ +#define DRYDOMAIN128_FINAL (1 << 9) + +/** + * \brief DrySPONGE128 domain value for processing the nonce. + */ +#define DRYDOMAIN128_NONCE (1 << 10) + +/** + * \brief DrySPONGE128 domain value for processing the associated data. + */ +#define DRYDOMAIN128_ASSOC_DATA (2 << 10) + +/** + * \brief DrySPONGE128 domain value for processing the message. + */ +#define DRYDOMAIN128_MESSAGE (3 << 10) + +/** + * \brief DrySPONGE256 domain bit for a padded block. + */ +#define DRYDOMAIN256_PADDED (1 << 2) + +/** + * \brief DrySPONGE256 domain bit for a final block. + */ +#define DRYDOMAIN256_FINAL (1 << 3) + +/** + * \brief DrySPONGE256 domain value for processing the nonce. + */ +#define DRYDOMAIN256_NONCE (1 << 4) + +/** + * \brief DrySPONGE256 domain value for processing the associated data. + */ +#define DRYDOMAIN256_ASSOC_DATA (2 << 4) + +/** + * \brief DrySPONGE256 domain value for processing the message. + */ +#define DRYDOMAIN256_MESSAGE (3 << 4) + +/** + * \brief Internal state of the GASCON-128 permutation. + */ +typedef union +{ + uint64_t S[GASCON128_STATE_SIZE / 8]; /**< 64-bit words of the state */ + uint32_t W[GASCON128_STATE_SIZE / 4]; /**< 32-bit words of the state */ + uint8_t B[GASCON128_STATE_SIZE]; /**< Bytes of the state */ + +} gascon128_state_t; + +/** + * \brief Internal state of the GASCON-256 permutation. + */ +typedef union +{ + uint64_t S[GASCON256_STATE_SIZE / 8]; /**< 64-bit words of the state */ + uint32_t W[GASCON256_STATE_SIZE / 4]; /**< 32-bit words of the state */ + uint8_t B[GASCON256_STATE_SIZE]; /**< Bytes of the state */ + +} gascon256_state_t; + +/** + * \brief Structure of a rate block for DrySPONGE128. + */ +typedef union +{ + uint64_t S[DRYSPONGE128_RATE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE128_RATE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE128_RATE]; /**< Bytes of the rate */ + +} drysponge128_rate_t; + +/** + * \brief Structure of a rate block for DrySPONGE256. + */ +typedef union +{ + uint64_t S[DRYSPONGE256_RATE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE256_RATE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE256_RATE]; /**< Bytes of the rate */ + +} drysponge256_rate_t; + +/** + * \brief Structure of the "x" value for DrySPONGE128. + */ +typedef union +{ + uint64_t S[DRYSPONGE128_XSIZE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE128_XSIZE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE128_XSIZE]; /**< Bytes of the rate */ + +} drysponge128_x_t; + +/** + * \brief Structure of the "x" value for DrySPONGE256. + */ +typedef union +{ + uint64_t S[DRYSPONGE256_XSIZE / 8]; /**< 64-bit words of the rate */ + uint32_t W[DRYSPONGE256_XSIZE / 4]; /**< 32-bit words of the rate */ + uint8_t B[DRYSPONGE256_XSIZE]; /**< Bytes of the rate */ + +} drysponge256_x_t; + +/** + * \brief Structure of the rolling DrySPONGE128 state. + */ +typedef struct +{ + gascon128_state_t c; /**< GASCON-128 state for the capacity */ + drysponge128_rate_t r; /**< Buffer for a rate block of data */ + drysponge128_x_t x; /**< "x" value for the sponge */ + uint32_t domain; /**< Domain value to mix on next F call */ + uint32_t rounds; /**< Number of rounds for next G call */ + +} drysponge128_state_t; + +/** + * \brief Structure of the rolling DrySPONGE256 state. + */ +typedef struct +{ + gascon256_state_t c; /**< GASCON-256 state for the capacity */ + drysponge256_rate_t r; /**< Buffer for a rate block of data */ + drysponge256_x_t x; /**< "x" value for the sponge */ + uint32_t domain; /**< Domain value to mix on next F call */ + uint32_t rounds; /**< Number of rounds for next G call */ + +} drysponge256_state_t; + +/** + * \brief Permutes the GASCON-128 state using one iteration of CoreRound. + * + * \param state The GASCON-128 state to be permuted. + * \param round The round number. + * + * The input and output \a state will be in little-endian byte order. + */ +void gascon128_core_round(gascon128_state_t *state, uint8_t round); + +/** + * \brief Permutes the GASCON-256 state using one iteration of CoreRound. + * + * \param state The GASCON-256 state to be permuted. + * \param round The round number. + * + * The input and output \a state will be in little-endian byte order. + */ +void gascon256_core_round(gascon256_state_t *state, uint8_t round); + +/** + * \brief Performs the DrySPONGE128 G function which runs the core + * rounds and squeezes data out of the GASGON-128 state. + * + * \param state The DrySPONGE128 state. + * + * The data that is squeezed out will be in state->r on exit. + */ +void drysponge128_g(drysponge128_state_t *state); + +/** + * \brief Performs the DrySPONGE256 G function which runs the core + * rounds and squeezes data out of the GASGON-256 state. + * + * \param state The DrySPONGE256 state. + * + * The data that is squeezed out will be in state->r on exit. + */ +void drysponge256_g(drysponge256_state_t *state); + +/** + * \brief Performs the DrySPONGE128 G function which runs the core + * rounds but does not squeeze out any output. + * + * \param state The DrySPONGE128 state. + */ +void drysponge128_g_core(drysponge128_state_t *state); + +/** + * \brief Performs the DrySPONGE256 G function which runs the core + * rounds but does not squeeze out any output. + * + * \param state The DrySPONGE256 state. + */ +void drysponge256_g_core(drysponge256_state_t *state); + +/** + * \brief Performs the absorption phase of the DrySPONGE128 F function. + * + * \param state The DrySPONGE128 state. + * \param input The block of input data to incorporate into the state. + * \param len The length of the input block, which must be less than + * or equal to DRYSPONGE128_RATE. Smaller input blocks will be padded. + * + * This function must be followed by a call to drysponge128_g() or + * drysponge128_g_core() to perform the full F operation. + */ +void drysponge128_f_absorb + (drysponge128_state_t *state, const unsigned char *input, unsigned len); + +/** + * \brief Performs the absorption phase of the DrySPONGE256 F function. + * + * \param state The DrySPONGE256 state. + * \param input The block of input data to incorporate into the state. + * \param len The length of the input block, which must be less than + * or equal to DRYSPONGE256_RATE. Smaller input blocks will be padded. + * + * This function must be followed by a call to drysponge256_g() or + * drysponge256_g_core() to perform the full F operation. + */ +void drysponge256_f_absorb + (drysponge256_state_t *state, const unsigned char *input, unsigned len); + +/** + * \brief Set up a DrySPONGE128 state to begin encryption or decryption. + * + * \param state The DrySPONGE128 state. + * \param key Points to the 16 bytes of the key. + * \param nonce Points to the 16 bytes of the nonce. + * \param final_block Non-zero if after key setup there will be no more blocks. + */ +void drysponge128_setup + (drysponge128_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block); + +/** + * \brief Set up a DrySPONGE256 state to begin encryption or decryption. + * + * \param state The DrySPONGE256 state. + * \param key Points to the 32 bytes of the key. + * \param nonce Points to the 16 bytes of the nonce. + * \param final_block Non-zero if after key setup there will be no more blocks. + */ +void drysponge256_setup + (drysponge256_state_t *state, const unsigned char *key, + const unsigned char *nonce, int final_block); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-util.h b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/drygascon/Implementations/crypto_hash/drygascon256/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/api.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/api.h deleted file mode 100644 index 32c9622..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.c deleted file mode 100644 index 2f7abb3..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.c +++ /dev/null @@ -1,881 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "elephant.h" -#include "internal-keccak.h" -#include "internal-spongent.h" -#include - -aead_cipher_t const dumbo_cipher = { - "Dumbo", - DUMBO_KEY_SIZE, - DUMBO_NONCE_SIZE, - DUMBO_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - dumbo_aead_encrypt, - dumbo_aead_decrypt -}; - -aead_cipher_t const jumbo_cipher = { - "Jumbo", - JUMBO_KEY_SIZE, - JUMBO_NONCE_SIZE, - JUMBO_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - jumbo_aead_encrypt, - jumbo_aead_decrypt -}; - -aead_cipher_t const delirium_cipher = { - "Delirium", - DELIRIUM_KEY_SIZE, - DELIRIUM_NONCE_SIZE, - DELIRIUM_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - delirium_aead_encrypt, - delirium_aead_decrypt -}; - -/** - * \brief Applies the Dumbo LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void dumbo_lfsr - (unsigned char out[SPONGENT160_STATE_SIZE], - const unsigned char in[SPONGENT160_STATE_SIZE]) -{ - unsigned char temp = - leftRotate3_8(in[0]) ^ (in[3] << 7) ^ (in[13] >> 7); - unsigned index; - for (index = 0; index < SPONGENT160_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[SPONGENT160_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Dumbo. - * - * \param state Points to the Spongent-pi[160] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void dumbo_process_ad - (spongent160_state_t *state, - unsigned char mask[SPONGENT160_STATE_SIZE], - unsigned char next[SPONGENT160_STATE_SIZE], - unsigned char tag[DUMBO_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - dumbo_lfsr(next, mask); - dumbo_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state->B, npub, DUMBO_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = DUMBO_NONCE_SIZE; - while (adlen > 0) { - size = SPONGENT160_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - spongent160_permute(state); - lw_xor_block(state->B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state->B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, DUMBO_TAG_SIZE); - dumbo_lfsr(mask, mask); - dumbo_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, SPONGENT160_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - spongent160_permute(state); - lw_xor_block(state->B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state->B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, DUMBO_TAG_SIZE); -} - -int dumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - spongent160_state_t state; - unsigned char start[SPONGENT160_STATE_SIZE]; - unsigned char mask[SPONGENT160_STATE_SIZE]; - unsigned char next[SPONGENT160_STATE_SIZE]; - unsigned char tag[DUMBO_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DUMBO_KEY_SIZE); - memset(state.B + DUMBO_KEY_SIZE, 0, sizeof(state.B) - DUMBO_KEY_SIZE); - spongent160_permute(&state); - memcpy(mask, state.B, DUMBO_KEY_SIZE); - memset(mask + DUMBO_KEY_SIZE, 0, sizeof(mask) - DUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - dumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= SPONGENT160_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, m, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - memcpy(c, state.B, SPONGENT160_STATE_SIZE); - - /* Authenticate using the next mask */ - dumbo_lfsr(next, mask); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT160_STATE_SIZE); - c += SPONGENT160_STATE_SIZE; - m += SPONGENT160_STATE_SIZE; - mlen -= SPONGENT160_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - dumbo_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, SPONGENT160_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - c += temp; - } else if (*clen != DUMBO_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - state.B[0] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, DUMBO_TAG_SIZE); - return 0; -} - -int dumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - spongent160_state_t state; - unsigned char *mtemp = m; - unsigned char start[SPONGENT160_STATE_SIZE]; - unsigned char mask[SPONGENT160_STATE_SIZE]; - unsigned char next[SPONGENT160_STATE_SIZE]; - unsigned char tag[DUMBO_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DUMBO_TAG_SIZE) - return -1; - *mlen = clen - DUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DUMBO_KEY_SIZE); - memset(state.B + DUMBO_KEY_SIZE, 0, sizeof(state.B) - DUMBO_KEY_SIZE); - spongent160_permute(&state); - memcpy(mask, state.B, DUMBO_KEY_SIZE); - memset(mask + DUMBO_KEY_SIZE, 0, sizeof(mask) - DUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - dumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= DUMBO_TAG_SIZE; - while (clen >= SPONGENT160_STATE_SIZE) { - /* Authenticate using the next mask */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, c, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, SPONGENT160_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT160_STATE_SIZE); - c += SPONGENT160_STATE_SIZE; - m += SPONGENT160_STATE_SIZE; - clen -= SPONGENT160_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - state.B[0] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, DUMBO_TAG_SIZE); -} - -/** - * \brief Applies the Jumbo LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void jumbo_lfsr - (unsigned char out[SPONGENT176_STATE_SIZE], - const unsigned char in[SPONGENT176_STATE_SIZE]) -{ - unsigned char temp = - leftRotate1_8(in[0]) ^ (in[3] << 7) ^ (in[19] >> 7); - unsigned index; - for (index = 0; index < SPONGENT176_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[SPONGENT176_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Jumbo. - * - * \param state Points to the Spongent-pi[170] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void jumbo_process_ad - (spongent176_state_t *state, - unsigned char mask[SPONGENT176_STATE_SIZE], - unsigned char next[SPONGENT176_STATE_SIZE], - unsigned char tag[JUMBO_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - jumbo_lfsr(next, mask); - jumbo_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state->B, npub, JUMBO_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = JUMBO_NONCE_SIZE; - while (adlen > 0) { - size = SPONGENT176_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - spongent176_permute(state); - lw_xor_block(state->B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state->B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, JUMBO_TAG_SIZE); - jumbo_lfsr(mask, mask); - jumbo_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, SPONGENT176_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - spongent176_permute(state); - lw_xor_block(state->B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state->B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, JUMBO_TAG_SIZE); -} - -int jumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - spongent176_state_t state; - unsigned char start[SPONGENT176_STATE_SIZE]; - unsigned char mask[SPONGENT176_STATE_SIZE]; - unsigned char next[SPONGENT176_STATE_SIZE]; - unsigned char tag[JUMBO_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + JUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, JUMBO_KEY_SIZE); - memset(state.B + JUMBO_KEY_SIZE, 0, sizeof(state.B) - JUMBO_KEY_SIZE); - spongent176_permute(&state); - memcpy(mask, state.B, JUMBO_KEY_SIZE); - memset(mask + JUMBO_KEY_SIZE, 0, sizeof(mask) - JUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - jumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= SPONGENT176_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, m, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - memcpy(c, state.B, SPONGENT176_STATE_SIZE); - - /* Authenticate using the next mask */ - jumbo_lfsr(next, mask); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT176_STATE_SIZE); - c += SPONGENT176_STATE_SIZE; - m += SPONGENT176_STATE_SIZE; - mlen -= SPONGENT176_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - jumbo_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, SPONGENT176_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - c += temp; - } else if (*clen != JUMBO_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - state.B[0] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, JUMBO_TAG_SIZE); - return 0; -} - -int jumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - spongent176_state_t state; - unsigned char *mtemp = m; - unsigned char start[SPONGENT176_STATE_SIZE]; - unsigned char mask[SPONGENT176_STATE_SIZE]; - unsigned char next[SPONGENT176_STATE_SIZE]; - unsigned char tag[JUMBO_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < JUMBO_TAG_SIZE) - return -1; - *mlen = clen - JUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, JUMBO_KEY_SIZE); - memset(state.B + JUMBO_KEY_SIZE, 0, sizeof(state.B) - JUMBO_KEY_SIZE); - spongent176_permute(&state); - memcpy(mask, state.B, JUMBO_KEY_SIZE); - memset(mask + JUMBO_KEY_SIZE, 0, sizeof(mask) - JUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - jumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= JUMBO_TAG_SIZE; - while (clen >= SPONGENT176_STATE_SIZE) { - /* Authenticate using the next mask */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, c, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, SPONGENT176_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT176_STATE_SIZE); - c += SPONGENT176_STATE_SIZE; - m += SPONGENT176_STATE_SIZE; - clen -= SPONGENT176_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - state.B[0] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, JUMBO_TAG_SIZE); -} - -/** - * \brief Applies the Delirium LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void delirium_lfsr - (unsigned char out[KECCAKP_200_STATE_SIZE], - const unsigned char in[KECCAKP_200_STATE_SIZE]) -{ - unsigned char temp = - leftRotate1_8(in[0]) ^ leftRotate1_8(in[2]) ^ (in[13] << 1); - unsigned index; - for (index = 0; index < KECCAKP_200_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[KECCAKP_200_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Delirium. - * - * \param state Points to the Keccak[200] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void delirium_process_ad - (keccakp_200_state_t *state, - unsigned char mask[KECCAKP_200_STATE_SIZE], - unsigned char next[KECCAKP_200_STATE_SIZE], - unsigned char tag[DELIRIUM_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - delirium_lfsr(next, mask); - delirium_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state->B, npub, DELIRIUM_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = DELIRIUM_NONCE_SIZE; - while (adlen > 0) { - size = KECCAKP_200_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - keccakp_200_permute(state); - lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); - delirium_lfsr(mask, mask); - delirium_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, KECCAKP_200_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - keccakp_200_permute(state); - lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); -} - -int delirium_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - keccakp_200_state_t state; - unsigned char start[KECCAKP_200_STATE_SIZE]; - unsigned char mask[KECCAKP_200_STATE_SIZE]; - unsigned char next[KECCAKP_200_STATE_SIZE]; - unsigned char tag[DELIRIUM_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DELIRIUM_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DELIRIUM_KEY_SIZE); - memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state); - memcpy(mask, state.B, DELIRIUM_KEY_SIZE); - memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - delirium_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= KECCAKP_200_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, m, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - memcpy(c, state.B, KECCAKP_200_STATE_SIZE); - - /* Authenticate using the next mask */ - delirium_lfsr(next, mask); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, KECCAKP_200_STATE_SIZE); - c += KECCAKP_200_STATE_SIZE; - m += KECCAKP_200_STATE_SIZE; - mlen -= KECCAKP_200_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - delirium_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, KECCAKP_200_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - c += temp; - } else if (*clen != DELIRIUM_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - state.B[0] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, DELIRIUM_TAG_SIZE); - return 0; -} - -int delirium_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - keccakp_200_state_t state; - unsigned char *mtemp = m; - unsigned char start[KECCAKP_200_STATE_SIZE]; - unsigned char mask[KECCAKP_200_STATE_SIZE]; - unsigned char next[KECCAKP_200_STATE_SIZE]; - unsigned char tag[DELIRIUM_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DELIRIUM_TAG_SIZE) - return -1; - *mlen = clen - DELIRIUM_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DELIRIUM_KEY_SIZE); - memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state); - memcpy(mask, state.B, DELIRIUM_KEY_SIZE); - memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - delirium_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= DELIRIUM_TAG_SIZE; - while (clen >= KECCAKP_200_STATE_SIZE) { - /* Authenticate using the next mask */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, c, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, KECCAKP_200_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, KECCAKP_200_STATE_SIZE); - c += KECCAKP_200_STATE_SIZE; - m += KECCAKP_200_STATE_SIZE; - clen -= KECCAKP_200_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - state.B[0] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, DELIRIUM_TAG_SIZE); -} diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.h deleted file mode 100644 index f775e3d..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/elephant.h +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ELEPHANT_H -#define LWCRYPTO_ELEPHANT_H - -#include "aead-common.h" - -/** - * \file elephant.h - * \brief Elephant authenticated encryption algorithm family. - * - * Elephant is a family of authenticated encryption algorithms based - * around the Spongent-pi and Keccak permutations. - * - * \li Dumbo has a 128-bit key, a 96-bit nonce, and a 64-bit authentication - * tag. It is based around the Spongent-pi[160] permutation. This is - * the primary member of the family. - * \li Jumbo has a 128-bit key, a 96-bit nonce, and a 64-bit authentication - * tag. It is based around the Spongent-pi[176] permutation. - * \li Delirium has a 128-bit key, a 96-bit nonce, and a 128-bit authentication - * tag. It is based around the Keccak[200] permutation. - * - * References: https://www.esat.kuleuven.be/cosic/elephant/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Dumbo. - */ -#define DUMBO_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Dumbo. - */ -#define DUMBO_TAG_SIZE 8 - -/** - * \brief Size of the nonce for Dumbo. - */ -#define DUMBO_NONCE_SIZE 12 - -/** - * \brief Size of the key for Jumbo. - */ -#define JUMBO_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Jumbo. - */ -#define JUMBO_TAG_SIZE 8 - -/** - * \brief Size of the nonce for Jumbo. - */ -#define JUMBO_NONCE_SIZE 12 - -/** - * \brief Size of the key for Delirium. - */ -#define DELIRIUM_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Delirium. - */ -#define DELIRIUM_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Delirium. - */ -#define DELIRIUM_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Dumbo cipher. - */ -extern aead_cipher_t const dumbo_cipher; - -/** - * \brief Meta-information block for the Jumbo cipher. - */ -extern aead_cipher_t const jumbo_cipher; - -/** - * \brief Meta-information block for the Delirium cipher. - */ -extern aead_cipher_t const delirium_cipher; - -/** - * \brief Encrypts and authenticates a packet with Dumbo. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa dumbo_aead_decrypt() - */ -int dumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Dumbo. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa dumbo_aead_encrypt() - */ -int dumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Jumbo. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa jumbo_aead_decrypt() - */ -int jumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Jumbo. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa jumbo_aead_encrypt() - */ -int jumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Delirium. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa delirium_aead_decrypt() - */ -int delirium_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Delirium. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa delirium_aead_encrypt() - */ -int delirium_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/encrypt.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/encrypt.c deleted file mode 100644 index df2a4b5..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "elephant.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return dumbo_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return dumbo_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak-avr.S b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak-avr.S deleted file mode 100644 index e50ccaf..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak-avr.S +++ /dev/null @@ -1,1552 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global keccakp_200_permute - .type keccakp_200_permute, @function -keccakp_200_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - ldd r24,Z+24 - push r31 - push r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,130 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - mov r30,r1 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,129 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - ldi r30,136 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,10 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,137 - eor r18,r30 - rcall 82f - ldi r30,3 - eor r18,r30 - rcall 82f - ldi r30,2 - eor r18,r30 - rcall 82f - ldi r30,128 - eor r18,r30 - rjmp 420f -82: - mov r30,r18 - eor r30,r23 - eor r30,r2 - eor r30,r7 - eor r30,r12 - mov r31,r19 - eor r31,r26 - eor r31,r3 - eor r31,r8 - eor r31,r13 - mov r25,r20 - eor r25,r27 - eor r25,r4 - eor r25,r9 - eor r25,r14 - mov r16,r21 - eor r16,r28 - eor r16,r5 - eor r16,r10 - eor r16,r15 - mov r17,r22 - eor r17,r29 - eor r17,r6 - eor r17,r11 - eor r17,r24 - mov r0,r31 - lsl r0 - adc r0,r1 - eor r0,r17 - eor r18,r0 - eor r23,r0 - eor r2,r0 - eor r7,r0 - eor r12,r0 - mov r0,r25 - lsl r0 - adc r0,r1 - eor r0,r30 - eor r19,r0 - eor r26,r0 - eor r3,r0 - eor r8,r0 - eor r13,r0 - mov r0,r16 - lsl r0 - adc r0,r1 - eor r0,r31 - eor r20,r0 - eor r27,r0 - eor r4,r0 - eor r9,r0 - eor r14,r0 - mov r0,r17 - lsl r0 - adc r0,r1 - eor r0,r25 - eor r21,r0 - eor r28,r0 - eor r5,r0 - eor r10,r0 - eor r15,r0 - mov r0,r30 - lsl r0 - adc r0,r1 - eor r0,r16 - eor r22,r0 - eor r29,r0 - eor r6,r0 - eor r11,r0 - eor r24,r0 - mov r30,r19 - swap r26 - mov r19,r26 - swap r29 - mov r26,r29 - mov r0,r1 - lsr r14 - ror r0 - lsr r14 - ror r0 - lsr r14 - ror r0 - or r14,r0 - mov r29,r14 - bst r6,0 - lsr r6 - bld r6,7 - mov r14,r6 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - mov r6,r12 - mov r0,r1 - lsr r20 - ror r0 - lsr r20 - ror r0 - or r20,r0 - mov r12,r20 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - mov r20,r4 - lsl r5 - adc r5,r1 - mov r4,r5 - mov r5,r11 - mov r11,r15 - lsl r7 - adc r7,r1 - mov r15,r7 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - mov r7,r22 - mov r0,r1 - lsr r24 - ror r0 - lsr r24 - ror r0 - or r24,r0 - mov r22,r24 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r24,r13 - bst r28,0 - lsr r28 - bld r28,7 - mov r13,r28 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r28,r8 - swap r23 - mov r8,r23 - swap r21 - mov r23,r21 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r21,r10 - bst r9,0 - lsr r9 - bld r9,7 - mov r10,r9 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - mov r9,r3 - mov r0,r1 - lsr r27 - ror r0 - lsr r27 - ror r0 - or r27,r0 - mov r3,r27 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - mov r27,r2 - lsl r30 - adc r30,r1 - mov r2,r30 - mov r30,r18 - mov r31,r19 - mov r25,r20 - mov r16,r21 - mov r17,r22 - mov r18,r25 - mov r0,r31 - com r0 - and r18,r0 - eor r18,r30 - mov r19,r16 - mov r0,r25 - com r0 - and r19,r0 - eor r19,r31 - mov r20,r17 - mov r0,r16 - com r0 - and r20,r0 - eor r20,r25 - mov r21,r30 - mov r0,r17 - com r0 - and r21,r0 - eor r21,r16 - mov r22,r31 - mov r0,r30 - com r0 - and r22,r0 - eor r22,r17 - mov r30,r23 - mov r31,r26 - mov r25,r27 - mov r16,r28 - mov r17,r29 - mov r23,r25 - mov r0,r31 - com r0 - and r23,r0 - eor r23,r30 - mov r26,r16 - mov r0,r25 - com r0 - and r26,r0 - eor r26,r31 - mov r27,r17 - mov r0,r16 - com r0 - and r27,r0 - eor r27,r25 - mov r28,r30 - mov r0,r17 - com r0 - and r28,r0 - eor r28,r16 - mov r29,r31 - mov r0,r30 - com r0 - and r29,r0 - eor r29,r17 - mov r30,r2 - mov r31,r3 - mov r25,r4 - mov r16,r5 - mov r17,r6 - mov r2,r25 - mov r0,r31 - com r0 - and r2,r0 - eor r2,r30 - mov r3,r16 - mov r0,r25 - com r0 - and r3,r0 - eor r3,r31 - mov r4,r17 - mov r0,r16 - com r0 - and r4,r0 - eor r4,r25 - mov r5,r30 - mov r0,r17 - com r0 - and r5,r0 - eor r5,r16 - mov r6,r31 - mov r0,r30 - com r0 - and r6,r0 - eor r6,r17 - mov r30,r7 - mov r31,r8 - mov r25,r9 - mov r16,r10 - mov r17,r11 - mov r7,r25 - mov r0,r31 - com r0 - and r7,r0 - eor r7,r30 - mov r8,r16 - mov r0,r25 - com r0 - and r8,r0 - eor r8,r31 - mov r9,r17 - mov r0,r16 - com r0 - and r9,r0 - eor r9,r25 - mov r10,r30 - mov r0,r17 - com r0 - and r10,r0 - eor r10,r16 - mov r11,r31 - mov r0,r30 - com r0 - and r11,r0 - eor r11,r17 - mov r30,r12 - mov r31,r13 - mov r25,r14 - mov r16,r15 - mov r17,r24 - mov r12,r25 - mov r0,r31 - com r0 - and r12,r0 - eor r12,r30 - mov r13,r16 - mov r0,r25 - com r0 - and r13,r0 - eor r13,r31 - mov r14,r17 - mov r0,r16 - com r0 - and r14,r0 - eor r14,r25 - mov r15,r30 - mov r0,r17 - com r0 - and r15,r0 - eor r15,r16 - mov r24,r31 - mov r0,r30 - com r0 - and r24,r0 - eor r24,r17 - ret -420: - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - std Z+24,r24 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size keccakp_200_permute, .-keccakp_200_permute - - .text -.global keccakp_400_permute - .type keccakp_400_permute, @function -keccakp_400_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - movw r30,r24 -.L__stack_usage = 17 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - ldd r10,Z+4 - ldd r11,Z+5 - ldd r12,Z+6 - ldd r13,Z+7 - ldd r14,Z+8 - ldd r15,Z+9 - cpi r22,20 - brcs 15f - rcall 153f - ldi r23,1 - eor r6,r23 -15: - cpi r22,19 - brcs 23f - rcall 153f - ldi r23,130 - eor r6,r23 - ldi r17,128 - eor r7,r17 -23: - cpi r22,18 - brcs 31f - rcall 153f - ldi r23,138 - eor r6,r23 - ldi r17,128 - eor r7,r17 -31: - cpi r22,17 - brcs 37f - rcall 153f - ldi r23,128 - eor r7,r23 -37: - cpi r22,16 - brcs 45f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -45: - cpi r22,15 - brcs 51f - rcall 153f - ldi r23,1 - eor r6,r23 -51: - cpi r22,14 - brcs 59f - rcall 153f - ldi r23,129 - eor r6,r23 - ldi r17,128 - eor r7,r17 -59: - cpi r22,13 - brcs 67f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -67: - cpi r22,12 - brcs 73f - rcall 153f - ldi r23,138 - eor r6,r23 -73: - cpi r22,11 - brcs 79f - rcall 153f - ldi r23,136 - eor r6,r23 -79: - cpi r22,10 - brcs 87f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -87: - cpi r22,9 - brcs 93f - rcall 153f - ldi r23,10 - eor r6,r23 -93: - cpi r22,8 - brcs 101f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -101: - cpi r22,7 - brcs 107f - rcall 153f - ldi r23,139 - eor r6,r23 -107: - cpi r22,6 - brcs 115f - rcall 153f - ldi r23,137 - eor r6,r23 - ldi r17,128 - eor r7,r17 -115: - cpi r22,5 - brcs 123f - rcall 153f - ldi r23,3 - eor r6,r23 - ldi r17,128 - eor r7,r17 -123: - cpi r22,4 - brcs 131f - rcall 153f - ldi r23,2 - eor r6,r23 - ldi r17,128 - eor r7,r17 -131: - cpi r22,3 - brcs 137f - rcall 153f - ldi r23,128 - eor r6,r23 -137: - cpi r22,2 - brcs 145f - rcall 153f - ldi r23,10 - eor r6,r23 - ldi r17,128 - eor r7,r17 -145: - cpi r22,1 - brcs 151f - rcall 153f - ldi r23,10 - eor r6,r23 -151: - rjmp 1004f -153: - movw r18,r6 - ldd r0,Z+10 - eor r18,r0 - ldd r0,Z+11 - eor r19,r0 - ldd r0,Z+20 - eor r18,r0 - ldd r0,Z+21 - eor r19,r0 - ldd r0,Z+30 - eor r18,r0 - ldd r0,Z+31 - eor r19,r0 - ldd r0,Z+40 - eor r18,r0 - ldd r0,Z+41 - eor r19,r0 - movw r20,r8 - ldd r0,Z+12 - eor r20,r0 - ldd r0,Z+13 - eor r21,r0 - ldd r0,Z+22 - eor r20,r0 - ldd r0,Z+23 - eor r21,r0 - ldd r0,Z+32 - eor r20,r0 - ldd r0,Z+33 - eor r21,r0 - ldd r0,Z+42 - eor r20,r0 - ldd r0,Z+43 - eor r21,r0 - movw r26,r10 - ldd r0,Z+14 - eor r26,r0 - ldd r0,Z+15 - eor r27,r0 - ldd r0,Z+24 - eor r26,r0 - ldd r0,Z+25 - eor r27,r0 - ldd r0,Z+34 - eor r26,r0 - ldd r0,Z+35 - eor r27,r0 - ldd r0,Z+44 - eor r26,r0 - ldd r0,Z+45 - eor r27,r0 - movw r2,r12 - ldd r0,Z+16 - eor r2,r0 - ldd r0,Z+17 - eor r3,r0 - ldd r0,Z+26 - eor r2,r0 - ldd r0,Z+27 - eor r3,r0 - ldd r0,Z+36 - eor r2,r0 - ldd r0,Z+37 - eor r3,r0 - ldd r0,Z+46 - eor r2,r0 - ldd r0,Z+47 - eor r3,r0 - movw r4,r14 - ldd r0,Z+18 - eor r4,r0 - ldd r0,Z+19 - eor r5,r0 - ldd r0,Z+28 - eor r4,r0 - ldd r0,Z+29 - eor r5,r0 - ldd r0,Z+38 - eor r4,r0 - ldd r0,Z+39 - eor r5,r0 - ldd r0,Z+48 - eor r4,r0 - ldd r0,Z+49 - eor r5,r0 - movw r24,r20 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r4 - eor r25,r5 - eor r6,r24 - eor r7,r25 - ldd r0,Z+10 - eor r0,r24 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r25 - std Z+11,r0 - ldd r0,Z+20 - eor r0,r24 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r25 - std Z+21,r0 - ldd r0,Z+30 - eor r0,r24 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r25 - std Z+31,r0 - ldd r0,Z+40 - eor r0,r24 - std Z+40,r0 - ldd r0,Z+41 - eor r0,r25 - std Z+41,r0 - movw r24,r26 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r8,r24 - eor r9,r25 - ldd r0,Z+12 - eor r0,r24 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r25 - std Z+13,r0 - ldd r0,Z+22 - eor r0,r24 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r25 - std Z+23,r0 - ldd r0,Z+32 - eor r0,r24 - std Z+32,r0 - ldd r0,Z+33 - eor r0,r25 - std Z+33,r0 - ldd r0,Z+42 - eor r0,r24 - std Z+42,r0 - ldd r0,Z+43 - eor r0,r25 - std Z+43,r0 - movw r24,r2 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r10,r24 - eor r11,r25 - ldd r0,Z+14 - eor r0,r24 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r25 - std Z+15,r0 - ldd r0,Z+24 - eor r0,r24 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r25 - std Z+25,r0 - ldd r0,Z+34 - eor r0,r24 - std Z+34,r0 - ldd r0,Z+35 - eor r0,r25 - std Z+35,r0 - ldd r0,Z+44 - eor r0,r24 - std Z+44,r0 - ldd r0,Z+45 - eor r0,r25 - std Z+45,r0 - movw r24,r4 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r26 - eor r25,r27 - eor r12,r24 - eor r13,r25 - ldd r0,Z+16 - eor r0,r24 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r25 - std Z+17,r0 - ldd r0,Z+26 - eor r0,r24 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r25 - std Z+27,r0 - ldd r0,Z+36 - eor r0,r24 - std Z+36,r0 - ldd r0,Z+37 - eor r0,r25 - std Z+37,r0 - ldd r0,Z+46 - eor r0,r24 - std Z+46,r0 - ldd r0,Z+47 - eor r0,r25 - std Z+47,r0 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r2 - eor r25,r3 - eor r14,r24 - eor r15,r25 - ldd r0,Z+18 - eor r0,r24 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r25 - std Z+19,r0 - ldd r0,Z+28 - eor r0,r24 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r25 - std Z+29,r0 - ldd r0,Z+38 - eor r0,r24 - std Z+38,r0 - ldd r0,Z+39 - eor r0,r25 - std Z+39,r0 - ldd r0,Z+48 - eor r0,r24 - std Z+48,r0 - ldd r0,Z+49 - eor r0,r25 - std Z+49,r0 - movw r24,r8 - ldd r8,Z+12 - ldd r9,Z+13 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldd r18,Z+18 - ldd r19,Z+19 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+12,r18 - std Z+13,r19 - ldd r18,Z+44 - ldd r19,Z+45 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+18,r18 - std Z+19,r19 - ldd r18,Z+28 - ldd r19,Z+29 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+44,r18 - std Z+45,r19 - ldd r18,Z+40 - ldd r19,Z+41 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+28,r18 - std Z+29,r19 - movw r18,r10 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+40,r18 - std Z+41,r19 - ldd r10,Z+24 - ldd r11,Z+25 - mov r0,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldd r18,Z+26 - ldd r19,Z+27 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+24,r18 - std Z+25,r19 - ldd r18,Z+38 - ldd r19,Z+39 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+26,r18 - std Z+27,r19 - ldd r18,Z+46 - ldd r19,Z+47 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+38,r18 - std Z+39,r19 - ldd r18,Z+30 - ldd r19,Z+31 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+46,r18 - std Z+47,r19 - movw r18,r14 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+30,r18 - std Z+31,r19 - ldd r14,Z+48 - ldd r15,Z+49 - mov r0,r1 - lsr r15 - ror r14 - ror r0 - lsr r15 - ror r14 - ror r0 - or r15,r0 - ldd r18,Z+42 - ldd r19,Z+43 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+48,r18 - std Z+49,r19 - ldd r18,Z+16 - ldd r19,Z+17 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+42,r18 - std Z+43,r19 - ldd r18,Z+32 - ldd r19,Z+33 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+16,r18 - std Z+17,r19 - ldd r18,Z+10 - ldd r19,Z+11 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+32,r18 - std Z+33,r19 - movw r18,r12 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+10,r18 - std Z+11,r19 - ldd r12,Z+36 - ldd r13,Z+37 - mov r0,r13 - mov r13,r12 - mov r12,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - ldd r18,Z+34 - ldd r19,Z+35 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+36,r18 - std Z+37,r19 - ldd r18,Z+22 - ldd r19,Z+23 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+34,r18 - std Z+35,r19 - ldd r18,Z+14 - ldd r19,Z+15 - mov r0,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+22,r18 - std Z+23,r19 - ldd r18,Z+20 - ldd r19,Z+21 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+14,r18 - std Z+15,r19 - lsl r24 - rol r25 - adc r24,r1 - std Z+20,r24 - std Z+21,r25 - movw r18,r6 - movw r20,r8 - movw r26,r10 - movw r2,r12 - movw r4,r14 - movw r6,r26 - mov r0,r20 - com r0 - and r6,r0 - mov r0,r21 - com r0 - and r7,r0 - eor r6,r18 - eor r7,r19 - movw r8,r2 - mov r0,r26 - com r0 - and r8,r0 - mov r0,r27 - com r0 - and r9,r0 - eor r8,r20 - eor r9,r21 - movw r10,r4 - mov r0,r2 - com r0 - and r10,r0 - mov r0,r3 - com r0 - and r11,r0 - eor r10,r26 - eor r11,r27 - movw r12,r18 - mov r0,r4 - com r0 - and r12,r0 - mov r0,r5 - com r0 - and r13,r0 - eor r12,r2 - eor r13,r3 - movw r14,r20 - mov r0,r18 - com r0 - and r14,r0 - mov r0,r19 - com r0 - and r15,r0 - eor r14,r4 - eor r15,r5 - ldd r18,Z+10 - ldd r19,Z+11 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+10,r24 - std Z+11,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+12,r24 - std Z+13,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+14,r24 - std Z+15,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+16,r24 - std Z+17,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+18,r24 - std Z+19,r25 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+20,r24 - std Z+21,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+22,r24 - std Z+23,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+24,r24 - std Z+25,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+26,r24 - std Z+27,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+28,r24 - std Z+29,r25 - ldd r18,Z+30 - ldd r19,Z+31 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+30,r24 - std Z+31,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+32,r24 - std Z+33,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+34,r24 - std Z+35,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+36,r24 - std Z+37,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+38,r24 - std Z+39,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - ldd r4,Z+48 - ldd r5,Z+49 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+40,r24 - std Z+41,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+42,r24 - std Z+43,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+44,r24 - std Z+45,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+46,r24 - std Z+47,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+48,r24 - std Z+49,r25 - ret -1004: - st Z,r6 - std Z+1,r7 - std Z+2,r8 - std Z+3,r9 - std Z+4,r10 - std Z+5,r11 - std Z+6,r12 - std Z+7,r13 - std Z+8,r14 - std Z+9,r15 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size keccakp_400_permute, .-keccakp_400_permute - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.c deleted file mode 100644 index 60539df..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-keccak.h" - -#if !defined(__AVR__) - -/* Faster method to compute ((x + y) % 5) that avoids the division */ -static unsigned char const addMod5Table[9] = { - 0, 1, 2, 3, 4, 0, 1, 2, 3 -}; -#define addMod5(x, y) (addMod5Table[(x) + (y)]) - -void keccakp_200_permute(keccakp_200_state_t *state) -{ - static uint8_t const RC[18] = { - 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, - 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, - 0x02, 0x80 - }; - uint8_t C[5]; - uint8_t D; - unsigned round; - unsigned index, index2; - for (round = 0; round < 18; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_8(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate4_8(state->A[1][1]); - state->A[1][1] = leftRotate4_8(state->A[1][4]); - state->A[1][4] = leftRotate5_8(state->A[4][2]); - state->A[4][2] = leftRotate7_8(state->A[2][4]); - state->A[2][4] = leftRotate2_8(state->A[4][0]); - state->A[4][0] = leftRotate6_8(state->A[0][2]); - state->A[0][2] = leftRotate3_8(state->A[2][2]); - state->A[2][2] = leftRotate1_8(state->A[2][3]); - state->A[2][3] = state->A[3][4]; - state->A[3][4] = state->A[4][3]; - state->A[4][3] = leftRotate1_8(state->A[3][0]); - state->A[3][0] = leftRotate3_8(state->A[0][4]); - state->A[0][4] = leftRotate6_8(state->A[4][4]); - state->A[4][4] = leftRotate2_8(state->A[4][1]); - state->A[4][1] = leftRotate7_8(state->A[1][3]); - state->A[1][3] = leftRotate5_8(state->A[3][1]); - state->A[3][1] = leftRotate4_8(state->A[1][0]); - state->A[1][0] = leftRotate4_8(state->A[0][3]); - state->A[0][3] = leftRotate5_8(state->A[3][3]); - state->A[3][3] = leftRotate7_8(state->A[3][2]); - state->A[3][2] = leftRotate2_8(state->A[2][1]); - state->A[2][1] = leftRotate6_8(state->A[1][2]); - state->A[1][2] = leftRotate3_8(state->A[2][0]); - state->A[2][0] = leftRotate1_8(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define keccakp_400_permute_host keccakp_400_permute -#endif - -/* Keccak-p[400] that assumes that the input is already in host byte order */ -void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) -{ - static uint16_t const RC[20] = { - 0x0001, 0x8082, 0x808A, 0x8000, 0x808B, 0x0001, 0x8081, 0x8009, - 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, - 0x8002, 0x0080, 0x800A, 0x000A - }; - uint16_t C[5]; - uint16_t D; - unsigned round; - unsigned index, index2; - for (round = 20 - rounds; round < 20; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_16(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate12_16(state->A[1][1]); - state->A[1][1] = leftRotate4_16 (state->A[1][4]); - state->A[1][4] = leftRotate13_16(state->A[4][2]); - state->A[4][2] = leftRotate7_16 (state->A[2][4]); - state->A[2][4] = leftRotate2_16 (state->A[4][0]); - state->A[4][0] = leftRotate14_16(state->A[0][2]); - state->A[0][2] = leftRotate11_16(state->A[2][2]); - state->A[2][2] = leftRotate9_16 (state->A[2][3]); - state->A[2][3] = leftRotate8_16 (state->A[3][4]); - state->A[3][4] = leftRotate8_16 (state->A[4][3]); - state->A[4][3] = leftRotate9_16 (state->A[3][0]); - state->A[3][0] = leftRotate11_16(state->A[0][4]); - state->A[0][4] = leftRotate14_16(state->A[4][4]); - state->A[4][4] = leftRotate2_16 (state->A[4][1]); - state->A[4][1] = leftRotate7_16 (state->A[1][3]); - state->A[1][3] = leftRotate13_16(state->A[3][1]); - state->A[3][1] = leftRotate4_16 (state->A[1][0]); - state->A[1][0] = leftRotate12_16(state->A[0][3]); - state->A[0][3] = leftRotate5_16 (state->A[3][3]); - state->A[3][3] = leftRotate15_16(state->A[3][2]); - state->A[3][2] = leftRotate10_16(state->A[2][1]); - state->A[2][1] = leftRotate6_16 (state->A[1][2]); - state->A[1][2] = leftRotate3_16 (state->A[2][0]); - state->A[2][0] = leftRotate1_16(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if !defined(LW_UTIL_LITTLE_ENDIAN) - -/** - * \brief Reverses the bytes in a Keccak-p[400] state. - * - * \param state The Keccak-p[400] state to apply byte-reversal to. - */ -static void keccakp_400_reverse_bytes(keccakp_400_state_t *state) -{ - unsigned index; - unsigned char temp1; - unsigned char temp2; - for (index = 0; index < 50; index += 2) { - temp1 = state->B[index]; - temp2 = state->B[index + 1]; - state->B[index] = temp2; - state->B[index + 1] = temp1; - } -} - -/* Keccak-p[400] that requires byte reversal on input and output */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) -{ - keccakp_400_reverse_bytes(state); - keccakp_400_permute_host(state, rounds); - keccakp_400_reverse_bytes(state); -} - -#endif - -#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.h deleted file mode 100644 index 2ffef42..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-keccak.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KECCAK_H -#define LW_INTERNAL_KECCAK_H - -#include "internal-util.h" - -/** - * \file internal-keccak.h - * \brief Internal implementation of the Keccak-p permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for the Keccak-p[200] permutation. - */ -#define KECCAKP_200_STATE_SIZE 25 - -/** - * \brief Size of the state for the Keccak-p[400] permutation. - */ -#define KECCAKP_400_STATE_SIZE 50 - -/** - * \brief Structure of the internal state of the Keccak-p[200] permutation. - */ -typedef union -{ - uint8_t A[5][5]; /**< Keccak-p[200] state as a 5x5 array of lanes */ - uint8_t B[25]; /**< Keccak-p[200] state as a byte array */ - -} keccakp_200_state_t; - -/** - * \brief Structure of the internal state of the Keccak-p[400] permutation. - */ -typedef union -{ - uint16_t A[5][5]; /**< Keccak-p[400] state as a 5x5 array of lanes */ - uint8_t B[50]; /**< Keccak-p[400] state as a byte array */ - -} keccakp_400_state_t; - -/** - * \brief Permutes the Keccak-p[200] state. - * - * \param state The Keccak-p[200] state to be permuted. - */ -void keccakp_200_permute(keccakp_200_state_t *state); - -/** - * \brief Permutes the Keccak-p[400] state, which is assumed to be in - * little-endian byte order. - * - * \param state The Keccak-p[400] state to be permuted. - * \param rounds The number of rounds to perform (up to 20). - */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent-avr.S b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent-avr.S deleted file mode 100644 index 4a43458..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent-avr.S +++ /dev/null @@ -1,1677 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 238 - .byte 237 - .byte 235 - .byte 224 - .byte 226 - .byte 225 - .byte 228 - .byte 239 - .byte 231 - .byte 234 - .byte 232 - .byte 229 - .byte 233 - .byte 236 - .byte 227 - .byte 230 - .byte 222 - .byte 221 - .byte 219 - .byte 208 - .byte 210 - .byte 209 - .byte 212 - .byte 223 - .byte 215 - .byte 218 - .byte 216 - .byte 213 - .byte 217 - .byte 220 - .byte 211 - .byte 214 - .byte 190 - .byte 189 - .byte 187 - .byte 176 - .byte 178 - .byte 177 - .byte 180 - .byte 191 - .byte 183 - .byte 186 - .byte 184 - .byte 181 - .byte 185 - .byte 188 - .byte 179 - .byte 182 - .byte 14 - .byte 13 - .byte 11 - .byte 0 - .byte 2 - .byte 1 - .byte 4 - .byte 15 - .byte 7 - .byte 10 - .byte 8 - .byte 5 - .byte 9 - .byte 12 - .byte 3 - .byte 6 - .byte 46 - .byte 45 - .byte 43 - .byte 32 - .byte 34 - .byte 33 - .byte 36 - .byte 47 - .byte 39 - .byte 42 - .byte 40 - .byte 37 - .byte 41 - .byte 44 - .byte 35 - .byte 38 - .byte 30 - .byte 29 - .byte 27 - .byte 16 - .byte 18 - .byte 17 - .byte 20 - .byte 31 - .byte 23 - .byte 26 - .byte 24 - .byte 21 - .byte 25 - .byte 28 - .byte 19 - .byte 22 - .byte 78 - .byte 77 - .byte 75 - .byte 64 - .byte 66 - .byte 65 - .byte 68 - .byte 79 - .byte 71 - .byte 74 - .byte 72 - .byte 69 - .byte 73 - .byte 76 - .byte 67 - .byte 70 - .byte 254 - .byte 253 - .byte 251 - .byte 240 - .byte 242 - .byte 241 - .byte 244 - .byte 255 - .byte 247 - .byte 250 - .byte 248 - .byte 245 - .byte 249 - .byte 252 - .byte 243 - .byte 246 - .byte 126 - .byte 125 - .byte 123 - .byte 112 - .byte 114 - .byte 113 - .byte 116 - .byte 127 - .byte 119 - .byte 122 - .byte 120 - .byte 117 - .byte 121 - .byte 124 - .byte 115 - .byte 118 - .byte 174 - .byte 173 - .byte 171 - .byte 160 - .byte 162 - .byte 161 - .byte 164 - .byte 175 - .byte 167 - .byte 170 - .byte 168 - .byte 165 - .byte 169 - .byte 172 - .byte 163 - .byte 166 - .byte 142 - .byte 141 - .byte 139 - .byte 128 - .byte 130 - .byte 129 - .byte 132 - .byte 143 - .byte 135 - .byte 138 - .byte 136 - .byte 133 - .byte 137 - .byte 140 - .byte 131 - .byte 134 - .byte 94 - .byte 93 - .byte 91 - .byte 80 - .byte 82 - .byte 81 - .byte 84 - .byte 95 - .byte 87 - .byte 90 - .byte 88 - .byte 85 - .byte 89 - .byte 92 - .byte 83 - .byte 86 - .byte 158 - .byte 157 - .byte 155 - .byte 144 - .byte 146 - .byte 145 - .byte 148 - .byte 159 - .byte 151 - .byte 154 - .byte 152 - .byte 149 - .byte 153 - .byte 156 - .byte 147 - .byte 150 - .byte 206 - .byte 205 - .byte 203 - .byte 192 - .byte 194 - .byte 193 - .byte 196 - .byte 207 - .byte 199 - .byte 202 - .byte 200 - .byte 197 - .byte 201 - .byte 204 - .byte 195 - .byte 198 - .byte 62 - .byte 61 - .byte 59 - .byte 48 - .byte 50 - .byte 49 - .byte 52 - .byte 63 - .byte 55 - .byte 58 - .byte 56 - .byte 53 - .byte 57 - .byte 60 - .byte 51 - .byte 54 - .byte 110 - .byte 109 - .byte 107 - .byte 96 - .byte 98 - .byte 97 - .byte 100 - .byte 111 - .byte 103 - .byte 106 - .byte 104 - .byte 101 - .byte 105 - .byte 108 - .byte 99 - .byte 102 - - .text -.global spongent160_permute - .type spongent160_permute, @function -spongent160_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 -.L__stack_usage = 16 - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - ldd r10,Z+12 - ldd r11,Z+13 - ldd r12,Z+14 - ldd r13,Z+15 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r24,Z+18 - ldd r25,Z+19 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r21,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r21 -#endif - ldi r18,80 - ldi r19,117 - ldi r20,174 -25: - eor r22,r19 - eor r25,r20 - lsl r19 - bst r19,7 - bld r19,0 - mov r0,r1 - bst r19,6 - bld r0,0 - eor r19,r0 - andi r19,127 - lsr r20 - bst r20,0 - bld r20,7 - mov r0,r1 - bst r20,1 - bld r0,7 - eor r20,r0 - andi r20,254 - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r28 -#if defined(RAMPZ) - elpm r28,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r28,Z -#elif defined(__AVR_TINY__) - ld r28,Z -#else - lpm - mov r28,r0 -#endif - mov r30,r29 -#if defined(RAMPZ) - elpm r29,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r29,Z -#elif defined(__AVR_TINY__) - ld r29,Z -#else - lpm - mov r29,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r28,0 - bld r22,4 - bst r6,0 - bld r28,0 - bst r10,1 - bld r6,0 - bst r6,6 - bld r10,1 - bst r13,1 - bld r6,6 - bst r22,7 - bld r13,1 - bst r29,4 - bld r22,7 - bst r12,0 - bld r29,4 - bst r14,2 - bld r12,0 - bst r3,3 - bld r14,2 - bst r23,5 - bld r3,3 - bst r4,4 - bld r23,5 - bst r4,1 - bld r4,4 - bst r2,5 - bld r4,1 - bst r24,4 - bld r2,5 - bst r12,3 - bld r24,4 - bst r15,6 - bld r12,3 - bst r9,3 - bld r15,6 - bst r3,6 - bld r9,3 - bst r29,1 - bld r3,6 - bst r10,4 - bld r29,1 - bst r8,2 - bld r10,4 - bst r23,2 - bld r8,2 - bst r3,0 - bld r23,2 - bst r0,0 - bld r3,0 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r2,0 - bld r23,0 - bst r14,0 - bld r2,0 - bst r2,3 - bld r14,0 - bst r15,4 - bld r2,3 - bst r8,3 - bld r15,4 - bst r23,6 - bld r8,3 - bst r5,0 - bld r23,6 - bst r6,1 - bld r5,0 - bst r10,5 - bld r6,1 - bst r8,6 - bld r10,5 - bst r29,2 - bld r8,6 - bst r11,0 - bld r29,2 - bst r10,2 - bld r11,0 - bst r7,2 - bld r10,2 - bst r15,1 - bld r7,2 - bst r6,7 - bld r15,1 - bst r13,5 - bld r6,7 - bst r28,7 - bld r13,5 - bst r9,4 - bld r28,7 - bst r4,2 - bld r9,4 - bst r3,1 - bld r4,2 - bst r22,5 - bld r3,1 - bst r28,4 - bld r22,5 - bst r8,0 - bld r28,4 - bst r0,0 - bld r8,0 - bst r22,3 - bld r0,0 - bst r23,4 - bld r22,3 - bst r4,0 - bld r23,4 - bst r2,1 - bld r4,0 - bst r14,4 - bld r2,1 - bst r4,3 - bld r14,4 - bst r3,5 - bld r4,3 - bst r28,5 - bld r3,5 - bst r8,4 - bld r28,5 - bst r28,2 - bld r8,4 - bst r7,0 - bld r28,2 - bst r14,1 - bld r7,0 - bst r2,7 - bld r14,1 - bst r25,4 - bld r2,7 - bst r24,3 - bld r25,4 - bst r11,7 - bld r24,3 - bst r13,6 - bld r11,7 - bst r29,3 - bld r13,6 - bst r11,4 - bld r29,3 - bst r12,2 - bld r11,4 - bst r15,2 - bld r12,2 - bst r7,3 - bld r15,2 - bst r15,5 - bld r7,3 - bst r8,7 - bld r15,5 - bst r29,6 - bld r8,7 - bst r13,0 - bld r29,6 - bst r0,0 - bld r13,0 - bst r22,6 - bld r0,0 - bst r29,0 - bld r22,6 - bst r10,0 - bld r29,0 - bst r6,2 - bld r10,0 - bst r11,1 - bld r6,2 - bst r10,6 - bld r11,1 - bst r9,2 - bld r10,6 - bst r3,2 - bld r9,2 - bst r23,1 - bld r3,2 - bst r2,4 - bld r23,1 - bst r24,0 - bld r2,4 - bst r10,3 - bld r24,0 - bst r7,6 - bld r10,3 - bst r25,1 - bld r7,6 - bst r14,7 - bld r25,1 - bst r5,7 - bld r14,7 - bst r9,5 - bld r5,7 - bst r4,6 - bld r9,5 - bst r5,1 - bld r4,6 - bst r6,5 - bld r5,1 - bst r12,5 - bld r6,5 - bst r24,6 - bld r12,5 - bst r13,3 - bld r24,6 - bst r23,7 - bld r13,3 - bst r5,4 - bld r23,7 - bst r8,1 - bld r5,4 - bst r0,0 - bld r8,1 - bst r23,3 - bld r0,0 - bst r3,4 - bld r23,3 - bst r28,1 - bld r3,4 - bst r6,4 - bld r28,1 - bst r12,1 - bld r6,4 - bst r14,6 - bld r12,1 - bst r5,3 - bld r14,6 - bst r7,5 - bld r5,3 - bst r24,5 - bld r7,5 - bst r12,7 - bld r24,5 - bst r25,6 - bld r12,7 - bst r25,3 - bld r25,6 - bst r15,7 - bld r25,3 - bst r9,7 - bld r15,7 - bst r5,6 - bld r9,7 - bst r9,1 - bld r5,6 - bst r2,6 - bld r9,1 - bst r25,0 - bld r2,6 - bst r14,3 - bld r25,0 - bst r3,7 - bld r14,3 - bst r29,5 - bld r3,7 - bst r12,4 - bld r29,5 - bst r24,2 - bld r12,4 - bst r11,3 - bld r24,2 - bst r11,6 - bld r11,3 - bst r13,2 - bld r11,6 - bst r0,0 - bld r13,2 - bst r28,3 - bld r0,0 - bst r7,4 - bld r28,3 - bst r24,1 - bld r7,4 - bst r10,7 - bld r24,1 - bst r9,6 - bld r10,7 - bst r5,2 - bld r9,6 - bst r7,1 - bld r5,2 - bst r14,5 - bld r7,1 - bst r4,7 - bld r14,5 - bst r5,5 - bld r4,7 - bst r8,5 - bld r5,5 - bst r28,6 - bld r8,5 - bst r9,0 - bld r28,6 - bst r2,2 - bld r9,0 - bst r15,0 - bld r2,2 - bst r6,3 - bld r15,0 - bst r11,5 - bld r6,3 - bst r12,6 - bld r11,5 - bst r25,2 - bld r12,6 - bst r15,3 - bld r25,2 - bst r7,7 - bld r15,3 - bst r25,5 - bld r7,7 - bst r24,7 - bld r25,5 - bst r13,7 - bld r24,7 - bst r29,7 - bld r13,7 - bst r13,4 - bld r29,7 - bst r0,0 - bld r13,4 - dec r18 - breq 5389f - rjmp 25b -5389: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - st X+,r22 - st X+,r23 - st X+,r28 - st X+,r29 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size spongent160_permute, .-spongent160_permute - - .text -.global spongent176_permute - .type spongent176_permute, @function -spongent176_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - ldd r10,Z+12 - ldd r11,Z+13 - ldd r12,Z+14 - ldd r13,Z+15 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r24,Z+18 - ldd r25,Z+19 - ldd r16,Z+20 - ldd r17,Z+21 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r21,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r21 -#endif - ldi r18,90 - ldi r19,69 - ldi r20,162 -27: - eor r22,r19 - eor r17,r20 - lsl r19 - bst r19,7 - bld r19,0 - mov r0,r1 - bst r19,6 - bld r0,0 - eor r19,r0 - andi r19,127 - lsr r20 - bst r20,0 - bld r20,7 - mov r0,r1 - bst r20,1 - bld r0,7 - eor r20,r0 - andi r20,254 - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r28 -#if defined(RAMPZ) - elpm r28,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r28,Z -#elif defined(__AVR_TINY__) - ld r28,Z -#else - lpm - mov r28,r0 -#endif - mov r30,r29 -#if defined(RAMPZ) - elpm r29,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r29,Z -#elif defined(__AVR_TINY__) - ld r29,Z -#else - lpm - mov r29,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r28,0 - bld r22,4 - bst r6,0 - bld r28,0 - bst r8,1 - bld r6,0 - bst r24,5 - bld r8,1 - bst r6,7 - bld r24,5 - bst r11,5 - bld r6,7 - bst r8,6 - bld r11,5 - bst r17,1 - bld r8,6 - bst r24,7 - bld r17,1 - bst r7,7 - bld r24,7 - bst r15,5 - bld r7,7 - bst r2,7 - bld r15,5 - bst r25,4 - bld r2,7 - bst r10,3 - bld r25,4 - bst r3,6 - bld r10,3 - bst r23,1 - bld r3,6 - bst r2,4 - bld r23,1 - bst r24,0 - bld r2,4 - bst r4,3 - bld r24,0 - bst r29,5 - bld r4,3 - bst r12,4 - bld r29,5 - bst r12,2 - bld r12,4 - bst r11,2 - bld r12,2 - bst r7,2 - bld r11,2 - bst r13,1 - bld r7,2 - bst r14,6 - bld r13,1 - bst r23,3 - bld r14,6 - bst r3,4 - bld r23,3 - bst r0,0 - bld r3,4 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r2,0 - bld r23,0 - bst r14,0 - bld r2,0 - bst r16,2 - bld r14,0 - bst r13,3 - bld r16,2 - bst r15,6 - bld r13,3 - bst r3,3 - bld r15,6 - bst r17,4 - bld r3,3 - bst r16,3 - bld r17,4 - bst r13,7 - bld r16,3 - bst r25,6 - bld r13,7 - bst r11,3 - bld r25,6 - bst r7,6 - bld r11,3 - bst r15,1 - bld r7,6 - bst r28,7 - bld r15,1 - bst r9,4 - bld r28,7 - bst r28,2 - bld r9,4 - bst r7,0 - bld r28,2 - bst r12,1 - bld r7,0 - bst r10,6 - bld r12,1 - bst r5,2 - bld r10,6 - bst r5,1 - bld r5,2 - bst r4,5 - bld r5,1 - bst r2,5 - bld r4,5 - bst r24,4 - bld r2,5 - bst r6,3 - bld r24,4 - bst r9,5 - bld r6,3 - bst r28,6 - bld r9,5 - bst r9,0 - bld r28,6 - bst r0,0 - bld r9,0 - bst r22,3 - bld r0,0 - bst r23,4 - bld r22,3 - bst r4,0 - bld r23,4 - bst r28,1 - bld r4,0 - bst r6,4 - bld r28,1 - bst r10,1 - bld r6,4 - bst r2,6 - bld r10,1 - bst r25,0 - bld r2,6 - bst r8,3 - bld r25,0 - bst r25,5 - bld r8,3 - bst r10,7 - bld r25,5 - bst r5,6 - bld r10,7 - bst r7,1 - bld r5,6 - bst r12,5 - bld r7,1 - bst r12,6 - bld r12,5 - bst r13,2 - bld r12,6 - bst r15,2 - bld r13,2 - bst r29,3 - bld r15,2 - bst r11,4 - bld r29,3 - bst r8,2 - bld r11,4 - bst r25,1 - bld r8,2 - bst r8,7 - bld r25,1 - bst r17,5 - bld r8,7 - bst r16,7 - bld r17,5 - bst r15,7 - bld r16,7 - bst r3,7 - bld r15,7 - bst r23,5 - bld r3,7 - bst r4,4 - bld r23,5 - bst r2,1 - bld r4,4 - bst r14,4 - bld r2,1 - bst r0,0 - bld r14,4 - bst r22,5 - bld r0,0 - bst r28,4 - bld r22,5 - bst r8,0 - bld r28,4 - bst r24,1 - bld r8,0 - bst r4,7 - bld r24,1 - bst r3,5 - bld r4,7 - bst r0,0 - bld r3,5 - bst r22,6 - bld r0,0 - bst r29,0 - bld r22,6 - bst r10,0 - bld r29,0 - bst r2,2 - bld r10,0 - bst r15,0 - bld r2,2 - bst r28,3 - bld r15,0 - bst r7,4 - bld r28,3 - bst r14,1 - bld r7,4 - bst r16,6 - bld r14,1 - bst r15,3 - bld r16,6 - bst r29,7 - bld r15,3 - bst r13,4 - bld r29,7 - bst r24,2 - bld r13,4 - bst r5,3 - bld r24,2 - bst r5,5 - bld r5,3 - bst r6,5 - bld r5,5 - bst r10,5 - bld r6,5 - bst r4,6 - bld r10,5 - bst r3,1 - bld r4,6 - bst r16,4 - bld r3,1 - bst r14,3 - bld r16,4 - bst r17,6 - bld r14,3 - bst r17,3 - bld r17,6 - bst r25,7 - bld r17,3 - bst r11,7 - bld r25,7 - bst r9,6 - bld r11,7 - bst r29,2 - bld r9,6 - bst r11,0 - bld r29,2 - bst r6,2 - bld r11,0 - bst r9,1 - bld r6,2 - bst r0,0 - bld r9,1 - bst r22,7 - bld r0,0 - bst r29,4 - bld r22,7 - bst r12,0 - bld r29,4 - bst r10,2 - bld r12,0 - bst r3,2 - bld r10,2 - bst r17,0 - bld r3,2 - bst r24,3 - bld r17,0 - bst r5,7 - bld r24,3 - bst r7,5 - bld r5,7 - bst r14,5 - bld r7,5 - bst r0,0 - bld r14,5 - bst r23,2 - bld r0,0 - bst r3,0 - bld r23,2 - bst r16,0 - bld r3,0 - bst r12,3 - bld r16,0 - bst r11,6 - bld r12,3 - bst r9,2 - bld r11,6 - bst r0,0 - bld r9,2 - bst r23,6 - bld r0,0 - bst r5,0 - bld r23,6 - bst r4,1 - bld r5,0 - bst r28,5 - bld r4,1 - bst r8,4 - bld r28,5 - bst r16,1 - bld r8,4 - bst r12,7 - bld r16,1 - bst r13,6 - bld r12,7 - bst r25,2 - bld r13,6 - bst r9,3 - bld r25,2 - bst r0,0 - bld r9,3 - bst r23,7 - bld r0,0 - bst r5,4 - bld r23,7 - bst r6,1 - bld r5,4 - bst r8,5 - bld r6,1 - bst r16,5 - bld r8,5 - bst r14,7 - bld r16,5 - bst r0,0 - bld r14,7 - bst r29,1 - bld r0,0 - bst r10,4 - bld r29,1 - bst r4,2 - bld r10,4 - bst r0,0 - bld r4,2 - bst r29,6 - bld r0,0 - bst r13,0 - bld r29,6 - bst r14,2 - bld r13,0 - bst r17,2 - bld r14,2 - bst r25,3 - bld r17,2 - bst r9,7 - bld r25,3 - bst r0,0 - bld r9,7 - bst r2,3 - bld r0,0 - bst r15,4 - bld r2,3 - bst r0,0 - bld r15,4 - bst r6,6 - bld r0,0 - bst r11,1 - bld r6,6 - bst r0,0 - bld r11,1 - bst r7,3 - bld r0,0 - bst r13,5 - bld r7,3 - bst r24,6 - bld r13,5 - bst r0,0 - bld r24,6 - dec r18 - breq 5445f - rjmp 27b -5445: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - st X+,r22 - st X+,r23 - st X+,r28 - st X+,r29 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - st X+,r16 - st X+,r17 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size spongent176_permute, .-spongent176_permute - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.c deleted file mode 100644 index 8e0d57d..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-spongent.h" - -#if !defined(__AVR__) - -/** - * \brief Applies the Spongent-pi S-box in parallel to the 8 nibbles - * of a 32-bit word. - * - * \param x3 The input values to the parallel S-boxes. - * - * \return The output values from the parallel S-boxes. - * - * Based on the bit-sliced S-box implementation from here: - * https://github.com/DadaIsCrazy/usuba/blob/master/data/sboxes/spongent.ua - * - * Note that spongent.ua numbers bits from highest to lowest, so x0 is the - * high bit of each nibble and x3 is the low bit. - */ -static uint32_t spongent_sbox(uint32_t x3) -{ - uint32_t q0, q1, q2, q3, t0, t1, t2, t3; - uint32_t x2 = (x3 >> 1); - uint32_t x1 = (x2 >> 1); - uint32_t x0 = (x1 >> 1); - q0 = x0 ^ x2; - q1 = x1 ^ x2; - t0 = q0 & q1; - q2 = ~(x0 ^ x1 ^ x3 ^ t0); - t1 = q2 & ~x0; - q3 = x1 ^ t1; - t2 = q3 & (q3 ^ x2 ^ x3 ^ t0); - t3 = (x2 ^ t0) & ~(x1 ^ t0); - q0 = x1 ^ x2 ^ x3 ^ t2; - q1 = x0 ^ x2 ^ x3 ^ t0 ^ t1; - q2 = x0 ^ x1 ^ x2 ^ t1; - q3 = x0 ^ x3 ^ t0 ^ t3; - return ((q0 << 3) & 0x88888888U) | ((q1 << 2) & 0x44444444U) | - ((q2 << 1) & 0x22222222U) | (q3 & 0x11111111U); -} - -void spongent160_permute(spongent160_state_t *state) -{ - static uint8_t const RC[] = { - /* Round constants for Spongent-pi[160] */ - 0x75, 0xae, 0x6a, 0x56, 0x54, 0x2a, 0x29, 0x94, - 0x53, 0xca, 0x27, 0xe4, 0x4f, 0xf2, 0x1f, 0xf8, - 0x3e, 0x7c, 0x7d, 0xbe, 0x7a, 0x5e, 0x74, 0x2e, - 0x68, 0x16, 0x50, 0x0a, 0x21, 0x84, 0x43, 0xc2, - 0x07, 0xe0, 0x0e, 0x70, 0x1c, 0x38, 0x38, 0x1c, - 0x71, 0x8e, 0x62, 0x46, 0x44, 0x22, 0x09, 0x90, - 0x12, 0x48, 0x24, 0x24, 0x49, 0x92, 0x13, 0xc8, - 0x26, 0x64, 0x4d, 0xb2, 0x1b, 0xd8, 0x36, 0x6c, - 0x6d, 0xb6, 0x5a, 0x5a, 0x35, 0xac, 0x6b, 0xd6, - 0x56, 0x6a, 0x2d, 0xb4, 0x5b, 0xda, 0x37, 0xec, - 0x6f, 0xf6, 0x5e, 0x7a, 0x3d, 0xbc, 0x7b, 0xde, - 0x76, 0x6e, 0x6c, 0x36, 0x58, 0x1a, 0x31, 0x8c, - 0x63, 0xc6, 0x46, 0x62, 0x0d, 0xb0, 0x1a, 0x58, - 0x34, 0x2c, 0x69, 0x96, 0x52, 0x4a, 0x25, 0xa4, - 0x4b, 0xd2, 0x17, 0xe8, 0x2e, 0x74, 0x5d, 0xba, - 0x3b, 0xdc, 0x77, 0xee, 0x6e, 0x76, 0x5c, 0x3a, - 0x39, 0x9c, 0x73, 0xce, 0x66, 0x66, 0x4c, 0x32, - 0x19, 0x98, 0x32, 0x4c, 0x65, 0xa6, 0x4a, 0x52, - 0x15, 0xa8, 0x2a, 0x54, 0x55, 0xaa, 0x2b, 0xd4, - 0x57, 0xea, 0x2f, 0xf4, 0x5f, 0xfa, 0x3f, 0xfc - }; - const uint8_t *rc = RC; - uint32_t x0, x1, x2, x3, x4; - uint32_t t0, t1, t2, t3, t4; - uint8_t round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = state->W[0]; - x1 = state->W[1]; - x2 = state->W[2]; - x3 = state->W[3]; - x4 = state->W[4]; -#else - x0 = le_load_word32(state->B); - x1 = le_load_word32(state->B + 4); - x2 = le_load_word32(state->B + 8); - x3 = le_load_word32(state->B + 12); - x4 = le_load_word32(state->B + 16); -#endif - - /* Perform the 80 rounds of Spongent-pi[160] */ - for (round = 0; round < 80; ++round, rc += 2) { - /* Add the round constant to front and back of the state */ - x0 ^= rc[0]; - x4 ^= ((uint32_t)(rc[1])) << 24; - - /* Apply the S-box to all 4-bit groups in the state */ - t0 = spongent_sbox(x0); - t1 = spongent_sbox(x1); - t2 = spongent_sbox(x2); - t3 = spongent_sbox(x3); - t4 = spongent_sbox(x4); - - /* Permute the bits of the state. Bit i is moved to (40 * i) % 159 - * for all bits except the last which is left where it is. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - #define BCP(x, bit) ((x) & (((uint32_t)1) << (bit))) - #define BUP(x, from, to) \ - (((x) << ((to) - (from))) & (((uint32_t)1) << (to))) - #define BDN(x, from, to) \ - (((x) >> ((from) - (to))) & (((uint32_t)1) << (to))) - x0 = BCP(t0, 0) ^ BDN(t0, 4, 1) ^ BDN(t0, 8, 2) ^ - BDN(t0, 12, 3) ^ BDN(t0, 16, 4) ^ BDN(t0, 20, 5) ^ - BDN(t0, 24, 6) ^ BDN(t0, 28, 7) ^ BUP(t1, 0, 8) ^ - BUP(t1, 4, 9) ^ BUP(t1, 8, 10) ^ BDN(t1, 12, 11) ^ - BDN(t1, 16, 12) ^ BDN(t1, 20, 13) ^ BDN(t1, 24, 14) ^ - BDN(t1, 28, 15) ^ BUP(t2, 0, 16) ^ BUP(t2, 4, 17) ^ - BUP(t2, 8, 18) ^ BUP(t2, 12, 19) ^ BUP(t2, 16, 20) ^ - BUP(t2, 20, 21) ^ BDN(t2, 24, 22) ^ BDN(t2, 28, 23) ^ - BUP(t3, 0, 24) ^ BUP(t3, 4, 25) ^ BUP(t3, 8, 26) ^ - BUP(t3, 12, 27) ^ BUP(t3, 16, 28) ^ BUP(t3, 20, 29) ^ - BUP(t3, 24, 30) ^ BUP(t3, 28, 31); - x1 = BUP(t0, 1, 8) ^ BUP(t0, 5, 9) ^ BUP(t0, 9, 10) ^ - BDN(t0, 13, 11) ^ BDN(t0, 17, 12) ^ BDN(t0, 21, 13) ^ - BDN(t0, 25, 14) ^ BDN(t0, 29, 15) ^ BUP(t1, 1, 16) ^ - BUP(t1, 5, 17) ^ BUP(t1, 9, 18) ^ BUP(t1, 13, 19) ^ - BUP(t1, 17, 20) ^ BCP(t1, 21) ^ BDN(t1, 25, 22) ^ - BDN(t1, 29, 23) ^ BUP(t2, 1, 24) ^ BUP(t2, 5, 25) ^ - BUP(t2, 9, 26) ^ BUP(t2, 13, 27) ^ BUP(t2, 17, 28) ^ - BUP(t2, 21, 29) ^ BUP(t2, 25, 30) ^ BUP(t2, 29, 31) ^ - BCP(t4, 0) ^ BDN(t4, 4, 1) ^ BDN(t4, 8, 2) ^ - BDN(t4, 12, 3) ^ BDN(t4, 16, 4) ^ BDN(t4, 20, 5) ^ - BDN(t4, 24, 6) ^ BDN(t4, 28, 7); - x2 = BUP(t0, 2, 16) ^ BUP(t0, 6, 17) ^ BUP(t0, 10, 18) ^ - BUP(t0, 14, 19) ^ BUP(t0, 18, 20) ^ BDN(t0, 22, 21) ^ - BDN(t0, 26, 22) ^ BDN(t0, 30, 23) ^ BUP(t1, 2, 24) ^ - BUP(t1, 6, 25) ^ BUP(t1, 10, 26) ^ BUP(t1, 14, 27) ^ - BUP(t1, 18, 28) ^ BUP(t1, 22, 29) ^ BUP(t1, 26, 30) ^ - BUP(t1, 30, 31) ^ BDN(t3, 1, 0) ^ BDN(t3, 5, 1) ^ - BDN(t3, 9, 2) ^ BDN(t3, 13, 3) ^ BDN(t3, 17, 4) ^ - BDN(t3, 21, 5) ^ BDN(t3, 25, 6) ^ BDN(t3, 29, 7) ^ - BUP(t4, 1, 8) ^ BUP(t4, 5, 9) ^ BUP(t4, 9, 10) ^ - BDN(t4, 13, 11) ^ BDN(t4, 17, 12) ^ BDN(t4, 21, 13) ^ - BDN(t4, 25, 14) ^ BDN(t4, 29, 15); - x3 = BUP(t0, 3, 24) ^ BUP(t0, 7, 25) ^ BUP(t0, 11, 26) ^ - BUP(t0, 15, 27) ^ BUP(t0, 19, 28) ^ BUP(t0, 23, 29) ^ - BUP(t0, 27, 30) ^ BCP(t0, 31) ^ BDN(t2, 2, 0) ^ - BDN(t2, 6, 1) ^ BDN(t2, 10, 2) ^ BDN(t2, 14, 3) ^ - BDN(t2, 18, 4) ^ BDN(t2, 22, 5) ^ BDN(t2, 26, 6) ^ - BDN(t2, 30, 7) ^ BUP(t3, 2, 8) ^ BUP(t3, 6, 9) ^ - BCP(t3, 10) ^ BDN(t3, 14, 11) ^ BDN(t3, 18, 12) ^ - BDN(t3, 22, 13) ^ BDN(t3, 26, 14) ^ BDN(t3, 30, 15) ^ - BUP(t4, 2, 16) ^ BUP(t4, 6, 17) ^ BUP(t4, 10, 18) ^ - BUP(t4, 14, 19) ^ BUP(t4, 18, 20) ^ BDN(t4, 22, 21) ^ - BDN(t4, 26, 22) ^ BDN(t4, 30, 23); - x4 = BDN(t1, 3, 0) ^ BDN(t1, 7, 1) ^ BDN(t1, 11, 2) ^ - BDN(t1, 15, 3) ^ BDN(t1, 19, 4) ^ BDN(t1, 23, 5) ^ - BDN(t1, 27, 6) ^ BDN(t1, 31, 7) ^ BUP(t2, 3, 8) ^ - BUP(t2, 7, 9) ^ BDN(t2, 11, 10) ^ BDN(t2, 15, 11) ^ - BDN(t2, 19, 12) ^ BDN(t2, 23, 13) ^ BDN(t2, 27, 14) ^ - BDN(t2, 31, 15) ^ BUP(t3, 3, 16) ^ BUP(t3, 7, 17) ^ - BUP(t3, 11, 18) ^ BUP(t3, 15, 19) ^ BUP(t3, 19, 20) ^ - BDN(t3, 23, 21) ^ BDN(t3, 27, 22) ^ BDN(t3, 31, 23) ^ - BUP(t4, 3, 24) ^ BUP(t4, 7, 25) ^ BUP(t4, 11, 26) ^ - BUP(t4, 15, 27) ^ BUP(t4, 19, 28) ^ BUP(t4, 23, 29) ^ - BUP(t4, 27, 30) ^ BCP(t4, 31); - } - - /* Store the local variables back to the state in little-endian order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = x0; - state->W[1] = x1; - state->W[2] = x2; - state->W[3] = x3; - state->W[4] = x4; -#else - le_store_word32(state->B, x0); - le_store_word32(state->B + 4, x1); - le_store_word32(state->B + 8, x2); - le_store_word32(state->B + 12, x3); - le_store_word32(state->B + 16, x4); -#endif -} - -void spongent176_permute(spongent176_state_t *state) -{ - static uint8_t const RC[] = { - /* Round constants for Spongent-pi[176] */ - 0x45, 0xa2, 0x0b, 0xd0, 0x16, 0x68, 0x2c, 0x34, - 0x59, 0x9a, 0x33, 0xcc, 0x67, 0xe6, 0x4e, 0x72, - 0x1d, 0xb8, 0x3a, 0x5c, 0x75, 0xae, 0x6a, 0x56, - 0x54, 0x2a, 0x29, 0x94, 0x53, 0xca, 0x27, 0xe4, - 0x4f, 0xf2, 0x1f, 0xf8, 0x3e, 0x7c, 0x7d, 0xbe, - 0x7a, 0x5e, 0x74, 0x2e, 0x68, 0x16, 0x50, 0x0a, - 0x21, 0x84, 0x43, 0xc2, 0x07, 0xe0, 0x0e, 0x70, - 0x1c, 0x38, 0x38, 0x1c, 0x71, 0x8e, 0x62, 0x46, - 0x44, 0x22, 0x09, 0x90, 0x12, 0x48, 0x24, 0x24, - 0x49, 0x92, 0x13, 0xc8, 0x26, 0x64, 0x4d, 0xb2, - 0x1b, 0xd8, 0x36, 0x6c, 0x6d, 0xb6, 0x5a, 0x5a, - 0x35, 0xac, 0x6b, 0xd6, 0x56, 0x6a, 0x2d, 0xb4, - 0x5b, 0xda, 0x37, 0xec, 0x6f, 0xf6, 0x5e, 0x7a, - 0x3d, 0xbc, 0x7b, 0xde, 0x76, 0x6e, 0x6c, 0x36, - 0x58, 0x1a, 0x31, 0x8c, 0x63, 0xc6, 0x46, 0x62, - 0x0d, 0xb0, 0x1a, 0x58, 0x34, 0x2c, 0x69, 0x96, - 0x52, 0x4a, 0x25, 0xa4, 0x4b, 0xd2, 0x17, 0xe8, - 0x2e, 0x74, 0x5d, 0xba, 0x3b, 0xdc, 0x77, 0xee, - 0x6e, 0x76, 0x5c, 0x3a, 0x39, 0x9c, 0x73, 0xce, - 0x66, 0x66, 0x4c, 0x32, 0x19, 0x98, 0x32, 0x4c, - 0x65, 0xa6, 0x4a, 0x52, 0x15, 0xa8, 0x2a, 0x54, - 0x55, 0xaa, 0x2b, 0xd4, 0x57, 0xea, 0x2f, 0xf4, - 0x5f, 0xfa, 0x3f, 0xfc - }; - const uint8_t *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t t0, t1, t2, t3, t4, t5; - uint8_t round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = state->W[0]; - x1 = state->W[1]; - x2 = state->W[2]; - x3 = state->W[3]; - x4 = state->W[4]; - x5 = state->W[5]; -#else - x0 = le_load_word32(state->B); - x1 = le_load_word32(state->B + 4); - x2 = le_load_word32(state->B + 8); - x3 = le_load_word32(state->B + 12); - x4 = le_load_word32(state->B + 16); - x5 = le_load_word16(state->B + 20); /* Last word is only 16 bits */ -#endif - - /* Perform the 90 rounds of Spongent-pi[176] */ - for (round = 0; round < 90; ++round, rc += 2) { - /* Add the round constant to front and back of the state */ - x0 ^= rc[0]; - x5 ^= ((uint32_t)(rc[1])) << 8; - - /* Apply the S-box to all 4-bit groups in the state */ - t0 = spongent_sbox(x0); - t1 = spongent_sbox(x1); - t2 = spongent_sbox(x2); - t3 = spongent_sbox(x3); - t4 = spongent_sbox(x4); - t5 = spongent_sbox(x5); - - /* Permute the bits of the state. Bit i is moved to (44 * i) % 175 - * for all bits except the last which is left where it is. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - x0 = BCP(t0, 0) ^ BDN(t0, 4, 1) ^ BDN(t0, 8, 2) ^ - BDN(t0, 12, 3) ^ BDN(t0, 16, 4) ^ BDN(t0, 20, 5) ^ - BDN(t0, 24, 6) ^ BDN(t0, 28, 7) ^ BUP(t1, 0, 8) ^ - BUP(t1, 4, 9) ^ BUP(t1, 8, 10) ^ BDN(t1, 12, 11) ^ - BDN(t1, 16, 12) ^ BDN(t1, 20, 13) ^ BDN(t1, 24, 14) ^ - BDN(t1, 28, 15) ^ BUP(t2, 0, 16) ^ BUP(t2, 4, 17) ^ - BUP(t2, 8, 18) ^ BUP(t2, 12, 19) ^ BUP(t2, 16, 20) ^ - BUP(t2, 20, 21) ^ BDN(t2, 24, 22) ^ BDN(t2, 28, 23) ^ - BUP(t3, 0, 24) ^ BUP(t3, 4, 25) ^ BUP(t3, 8, 26) ^ - BUP(t3, 12, 27) ^ BUP(t3, 16, 28) ^ BUP(t3, 20, 29) ^ - BUP(t3, 24, 30) ^ BUP(t3, 28, 31); - x1 = BUP(t0, 1, 12) ^ BUP(t0, 5, 13) ^ BUP(t0, 9, 14) ^ - BUP(t0, 13, 15) ^ BDN(t0, 17, 16) ^ BDN(t0, 21, 17) ^ - BDN(t0, 25, 18) ^ BDN(t0, 29, 19) ^ BUP(t1, 1, 20) ^ - BUP(t1, 5, 21) ^ BUP(t1, 9, 22) ^ BUP(t1, 13, 23) ^ - BUP(t1, 17, 24) ^ BUP(t1, 21, 25) ^ BUP(t1, 25, 26) ^ - BDN(t1, 29, 27) ^ BUP(t2, 1, 28) ^ BUP(t2, 5, 29) ^ - BUP(t2, 9, 30) ^ BUP(t2, 13, 31) ^ BCP(t4, 0) ^ - BDN(t4, 4, 1) ^ BDN(t4, 8, 2) ^ BDN(t4, 12, 3) ^ - BDN(t4, 16, 4) ^ BDN(t4, 20, 5) ^ BDN(t4, 24, 6) ^ - BDN(t4, 28, 7) ^ BUP(t5, 0, 8) ^ BUP(t5, 4, 9) ^ - BUP(t5, 8, 10) ^ BDN(t5, 12, 11); - x2 = BUP(t0, 2, 24) ^ BUP(t0, 6, 25) ^ BUP(t0, 10, 26) ^ - BUP(t0, 14, 27) ^ BUP(t0, 18, 28) ^ BUP(t0, 22, 29) ^ - BUP(t0, 26, 30) ^ BUP(t0, 30, 31) ^ BDN(t2, 17, 0) ^ - BDN(t2, 21, 1) ^ BDN(t2, 25, 2) ^ BDN(t2, 29, 3) ^ - BUP(t3, 1, 4) ^ BCP(t3, 5) ^ BDN(t3, 9, 6) ^ - BDN(t3, 13, 7) ^ BDN(t3, 17, 8) ^ BDN(t3, 21, 9) ^ - BDN(t3, 25, 10) ^ BDN(t3, 29, 11) ^ BUP(t4, 1, 12) ^ - BUP(t4, 5, 13) ^ BUP(t4, 9, 14) ^ BUP(t4, 13, 15) ^ - BDN(t4, 17, 16) ^ BDN(t4, 21, 17) ^ BDN(t4, 25, 18) ^ - BDN(t4, 29, 19) ^ BUP(t5, 1, 20) ^ BUP(t5, 5, 21) ^ - BUP(t5, 9, 22) ^ BUP(t5, 13, 23); - x3 = BDN(t1, 2, 0) ^ BDN(t1, 6, 1) ^ BDN(t1, 10, 2) ^ - BDN(t1, 14, 3) ^ BDN(t1, 18, 4) ^ BDN(t1, 22, 5) ^ - BDN(t1, 26, 6) ^ BDN(t1, 30, 7) ^ BUP(t2, 2, 8) ^ - BUP(t2, 6, 9) ^ BCP(t2, 10) ^ BDN(t2, 14, 11) ^ - BDN(t2, 18, 12) ^ BDN(t2, 22, 13) ^ BDN(t2, 26, 14) ^ - BDN(t2, 30, 15) ^ BUP(t3, 2, 16) ^ BUP(t3, 6, 17) ^ - BUP(t3, 10, 18) ^ BUP(t3, 14, 19) ^ BUP(t3, 18, 20) ^ - BDN(t3, 22, 21) ^ BDN(t3, 26, 22) ^ BDN(t3, 30, 23) ^ - BUP(t4, 2, 24) ^ BUP(t4, 6, 25) ^ BUP(t4, 10, 26) ^ - BUP(t4, 14, 27) ^ BUP(t4, 18, 28) ^ BUP(t4, 22, 29) ^ - BUP(t4, 26, 30) ^ BUP(t4, 30, 31); - x4 = BUP(t0, 3, 4) ^ BDN(t0, 7, 5) ^ BDN(t0, 11, 6) ^ - BDN(t0, 15, 7) ^ BDN(t0, 19, 8) ^ BDN(t0, 23, 9) ^ - BDN(t0, 27, 10) ^ BDN(t0, 31, 11) ^ BUP(t1, 3, 12) ^ - BUP(t1, 7, 13) ^ BUP(t1, 11, 14) ^ BCP(t1, 15) ^ - BDN(t1, 19, 16) ^ BDN(t1, 23, 17) ^ BDN(t1, 27, 18) ^ - BDN(t1, 31, 19) ^ BUP(t2, 3, 20) ^ BUP(t2, 7, 21) ^ - BUP(t2, 11, 22) ^ BUP(t2, 15, 23) ^ BUP(t2, 19, 24) ^ - BUP(t2, 23, 25) ^ BDN(t2, 27, 26) ^ BDN(t2, 31, 27) ^ - BUP(t3, 3, 28) ^ BUP(t3, 7, 29) ^ BUP(t3, 11, 30) ^ - BUP(t3, 15, 31) ^ BDN(t5, 2, 0) ^ BDN(t5, 6, 1) ^ - BDN(t5, 10, 2) ^ BDN(t5, 14, 3); - x5 = BDN(t3, 19, 0) ^ BDN(t3, 23, 1) ^ BDN(t3, 27, 2) ^ - BDN(t3, 31, 3) ^ BUP(t4, 3, 4) ^ BDN(t4, 7, 5) ^ - BDN(t4, 11, 6) ^ BDN(t4, 15, 7) ^ BDN(t4, 19, 8) ^ - BDN(t4, 23, 9) ^ BDN(t4, 27, 10) ^ BDN(t4, 31, 11) ^ - BUP(t5, 3, 12) ^ BUP(t5, 7, 13) ^ BUP(t5, 11, 14) ^ - BCP(t5, 15); - } - - /* Store the local variables back to the state in little-endian order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = x0; - state->W[1] = x1; - state->W[2] = x2; - state->W[3] = x3; - state->W[4] = x4; - state->W[5] = x5; -#else - le_store_word32(state->B, x0); - le_store_word32(state->B + 4, x1); - le_store_word32(state->B + 8, x2); - le_store_word32(state->B + 12, x3); - le_store_word32(state->B + 16, x4); - le_store_word16(state->B + 20, x5); /* Last word is only 16 bits */ -#endif -} - -#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.h deleted file mode 100644 index bb9823f..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-spongent.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPONGENT_H -#define LW_INTERNAL_SPONGENT_H - -#include "internal-util.h" - -/** - * \file internal-spongent.h - * \brief Internal implementation of the Spongent-pi permutation. - * - * References: https://www.esat.kuleuven.be/cosic/elephant/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the Spongent-pi[160] state in bytes. - */ -#define SPONGENT160_STATE_SIZE 20 - -/** - * \brief Size of the Spongent-pi[176] state in bytes. - */ -#define SPONGENT176_STATE_SIZE 22 - -/** - * \brief Structure of the internal state of the Spongent-pi[160] permutation. - */ -typedef union -{ - uint32_t W[5]; /**< Spongent-pi[160] state as 32-bit words */ - uint8_t B[20]; /**< Spongent-pi[160] state as bytes */ - -} spongent160_state_t; - -/** - * \brief Structure of the internal state of the Spongent-pi[176] permutation. - * - * Note: The state is technically only 176 bits, but we increase it to - * 192 bits so that we can use 32-bit word operations to manipulate the - * state. The extra bits in the last word are fixed to zero. - */ -typedef union -{ - uint32_t W[6]; /**< Spongent-pi[176] state as 32-bit words */ - uint8_t B[24]; /**< Spongent-pi[176] state as bytes */ - -} spongent176_state_t; - -/** - * \brief Permutes the Spongent-pi[160] state. - * - * \param state The Spongent-pi[160] state to be permuted. - */ -void spongent160_permute(spongent160_state_t *state); - -/** - * \brief Permutes the Spongent-pi[176] state. - * - * \param state The Spongent-pi[176] state to be permuted. - */ -void spongent176_permute(spongent176_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-util.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys/elephant.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys/elephant.c index 770f568..2f7abb3 100644 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys/elephant.c +++ b/elephant/Implementations/crypto_aead/elephant160v1/rhys/elephant.c @@ -660,7 +660,7 @@ static void delirium_process_ad if (size <= adlen) { /* Process a complete block */ lw_xor_block(state->B + posn, ad, size); - keccakp_200_permute(state, 18); + keccakp_200_permute(state); lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); @@ -680,7 +680,7 @@ static void delirium_process_ad /* Pad and absorb the final block */ state->B[posn] ^= 0x01; - keccakp_200_permute(state, 18); + keccakp_200_permute(state); lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); @@ -707,7 +707,7 @@ int delirium_aead_encrypt /* Hash the key and generate the initial mask */ memcpy(state.B, k, DELIRIUM_KEY_SIZE); memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); memcpy(mask, state.B, DELIRIUM_KEY_SIZE); memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); memcpy(start, mask, sizeof(mask)); @@ -726,7 +726,7 @@ int delirium_aead_encrypt /* Encrypt using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, m, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); memcpy(c, state.B, KECCAKP_200_STATE_SIZE); @@ -735,7 +735,7 @@ int delirium_aead_encrypt delirium_lfsr(next, mask); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -751,7 +751,7 @@ int delirium_aead_encrypt unsigned temp = (unsigned)mlen; memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, m, temp); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); memcpy(c, state.B, temp); @@ -762,7 +762,7 @@ int delirium_aead_encrypt memset(state.B + temp + 1, 0, KECCAKP_200_STATE_SIZE - temp - 1); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -772,7 +772,7 @@ int delirium_aead_encrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); state.B[0] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -807,7 +807,7 @@ int delirium_aead_decrypt /* Hash the key and generate the initial mask */ memcpy(state.B, k, DELIRIUM_KEY_SIZE); memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); memcpy(mask, state.B, DELIRIUM_KEY_SIZE); memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); memcpy(start, mask, sizeof(mask)); @@ -828,7 +828,7 @@ int delirium_aead_decrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, c, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -836,7 +836,7 @@ int delirium_aead_decrypt /* Decrypt using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block_2_src(m, state.B, c, KECCAKP_200_STATE_SIZE); @@ -853,7 +853,7 @@ int delirium_aead_decrypt lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, c, temp); state.B[temp] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -861,7 +861,7 @@ int delirium_aead_decrypt /* Decrypt the last block using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, temp); lw_xor_block_2_src(m, state.B, c, temp); c += temp; @@ -870,7 +870,7 @@ int delirium_aead_decrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); state.B[0] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak-avr.S b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak-avr.S new file mode 100644 index 0000000..e50ccaf --- /dev/null +++ b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak-avr.S @@ -0,0 +1,1552 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global keccakp_200_permute + .type keccakp_200_permute, @function +keccakp_200_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + ldd r24,Z+24 + push r31 + push r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,130 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + mov r30,r1 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,129 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + ldi r30,136 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,10 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,137 + eor r18,r30 + rcall 82f + ldi r30,3 + eor r18,r30 + rcall 82f + ldi r30,2 + eor r18,r30 + rcall 82f + ldi r30,128 + eor r18,r30 + rjmp 420f +82: + mov r30,r18 + eor r30,r23 + eor r30,r2 + eor r30,r7 + eor r30,r12 + mov r31,r19 + eor r31,r26 + eor r31,r3 + eor r31,r8 + eor r31,r13 + mov r25,r20 + eor r25,r27 + eor r25,r4 + eor r25,r9 + eor r25,r14 + mov r16,r21 + eor r16,r28 + eor r16,r5 + eor r16,r10 + eor r16,r15 + mov r17,r22 + eor r17,r29 + eor r17,r6 + eor r17,r11 + eor r17,r24 + mov r0,r31 + lsl r0 + adc r0,r1 + eor r0,r17 + eor r18,r0 + eor r23,r0 + eor r2,r0 + eor r7,r0 + eor r12,r0 + mov r0,r25 + lsl r0 + adc r0,r1 + eor r0,r30 + eor r19,r0 + eor r26,r0 + eor r3,r0 + eor r8,r0 + eor r13,r0 + mov r0,r16 + lsl r0 + adc r0,r1 + eor r0,r31 + eor r20,r0 + eor r27,r0 + eor r4,r0 + eor r9,r0 + eor r14,r0 + mov r0,r17 + lsl r0 + adc r0,r1 + eor r0,r25 + eor r21,r0 + eor r28,r0 + eor r5,r0 + eor r10,r0 + eor r15,r0 + mov r0,r30 + lsl r0 + adc r0,r1 + eor r0,r16 + eor r22,r0 + eor r29,r0 + eor r6,r0 + eor r11,r0 + eor r24,r0 + mov r30,r19 + swap r26 + mov r19,r26 + swap r29 + mov r26,r29 + mov r0,r1 + lsr r14 + ror r0 + lsr r14 + ror r0 + lsr r14 + ror r0 + or r14,r0 + mov r29,r14 + bst r6,0 + lsr r6 + bld r6,7 + mov r14,r6 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + mov r6,r12 + mov r0,r1 + lsr r20 + ror r0 + lsr r20 + ror r0 + or r20,r0 + mov r12,r20 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + mov r20,r4 + lsl r5 + adc r5,r1 + mov r4,r5 + mov r5,r11 + mov r11,r15 + lsl r7 + adc r7,r1 + mov r15,r7 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + mov r7,r22 + mov r0,r1 + lsr r24 + ror r0 + lsr r24 + ror r0 + or r24,r0 + mov r22,r24 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r24,r13 + bst r28,0 + lsr r28 + bld r28,7 + mov r13,r28 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r28,r8 + swap r23 + mov r8,r23 + swap r21 + mov r23,r21 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r21,r10 + bst r9,0 + lsr r9 + bld r9,7 + mov r10,r9 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + mov r9,r3 + mov r0,r1 + lsr r27 + ror r0 + lsr r27 + ror r0 + or r27,r0 + mov r3,r27 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + mov r27,r2 + lsl r30 + adc r30,r1 + mov r2,r30 + mov r30,r18 + mov r31,r19 + mov r25,r20 + mov r16,r21 + mov r17,r22 + mov r18,r25 + mov r0,r31 + com r0 + and r18,r0 + eor r18,r30 + mov r19,r16 + mov r0,r25 + com r0 + and r19,r0 + eor r19,r31 + mov r20,r17 + mov r0,r16 + com r0 + and r20,r0 + eor r20,r25 + mov r21,r30 + mov r0,r17 + com r0 + and r21,r0 + eor r21,r16 + mov r22,r31 + mov r0,r30 + com r0 + and r22,r0 + eor r22,r17 + mov r30,r23 + mov r31,r26 + mov r25,r27 + mov r16,r28 + mov r17,r29 + mov r23,r25 + mov r0,r31 + com r0 + and r23,r0 + eor r23,r30 + mov r26,r16 + mov r0,r25 + com r0 + and r26,r0 + eor r26,r31 + mov r27,r17 + mov r0,r16 + com r0 + and r27,r0 + eor r27,r25 + mov r28,r30 + mov r0,r17 + com r0 + and r28,r0 + eor r28,r16 + mov r29,r31 + mov r0,r30 + com r0 + and r29,r0 + eor r29,r17 + mov r30,r2 + mov r31,r3 + mov r25,r4 + mov r16,r5 + mov r17,r6 + mov r2,r25 + mov r0,r31 + com r0 + and r2,r0 + eor r2,r30 + mov r3,r16 + mov r0,r25 + com r0 + and r3,r0 + eor r3,r31 + mov r4,r17 + mov r0,r16 + com r0 + and r4,r0 + eor r4,r25 + mov r5,r30 + mov r0,r17 + com r0 + and r5,r0 + eor r5,r16 + mov r6,r31 + mov r0,r30 + com r0 + and r6,r0 + eor r6,r17 + mov r30,r7 + mov r31,r8 + mov r25,r9 + mov r16,r10 + mov r17,r11 + mov r7,r25 + mov r0,r31 + com r0 + and r7,r0 + eor r7,r30 + mov r8,r16 + mov r0,r25 + com r0 + and r8,r0 + eor r8,r31 + mov r9,r17 + mov r0,r16 + com r0 + and r9,r0 + eor r9,r25 + mov r10,r30 + mov r0,r17 + com r0 + and r10,r0 + eor r10,r16 + mov r11,r31 + mov r0,r30 + com r0 + and r11,r0 + eor r11,r17 + mov r30,r12 + mov r31,r13 + mov r25,r14 + mov r16,r15 + mov r17,r24 + mov r12,r25 + mov r0,r31 + com r0 + and r12,r0 + eor r12,r30 + mov r13,r16 + mov r0,r25 + com r0 + and r13,r0 + eor r13,r31 + mov r14,r17 + mov r0,r16 + com r0 + and r14,r0 + eor r14,r25 + mov r15,r30 + mov r0,r17 + com r0 + and r15,r0 + eor r15,r16 + mov r24,r31 + mov r0,r30 + com r0 + and r24,r0 + eor r24,r17 + ret +420: + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + std Z+24,r24 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size keccakp_200_permute, .-keccakp_200_permute + + .text +.global keccakp_400_permute + .type keccakp_400_permute, @function +keccakp_400_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + movw r30,r24 +.L__stack_usage = 17 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + ldd r10,Z+4 + ldd r11,Z+5 + ldd r12,Z+6 + ldd r13,Z+7 + ldd r14,Z+8 + ldd r15,Z+9 + cpi r22,20 + brcs 15f + rcall 153f + ldi r23,1 + eor r6,r23 +15: + cpi r22,19 + brcs 23f + rcall 153f + ldi r23,130 + eor r6,r23 + ldi r17,128 + eor r7,r17 +23: + cpi r22,18 + brcs 31f + rcall 153f + ldi r23,138 + eor r6,r23 + ldi r17,128 + eor r7,r17 +31: + cpi r22,17 + brcs 37f + rcall 153f + ldi r23,128 + eor r7,r23 +37: + cpi r22,16 + brcs 45f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +45: + cpi r22,15 + brcs 51f + rcall 153f + ldi r23,1 + eor r6,r23 +51: + cpi r22,14 + brcs 59f + rcall 153f + ldi r23,129 + eor r6,r23 + ldi r17,128 + eor r7,r17 +59: + cpi r22,13 + brcs 67f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +67: + cpi r22,12 + brcs 73f + rcall 153f + ldi r23,138 + eor r6,r23 +73: + cpi r22,11 + brcs 79f + rcall 153f + ldi r23,136 + eor r6,r23 +79: + cpi r22,10 + brcs 87f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +87: + cpi r22,9 + brcs 93f + rcall 153f + ldi r23,10 + eor r6,r23 +93: + cpi r22,8 + brcs 101f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +101: + cpi r22,7 + brcs 107f + rcall 153f + ldi r23,139 + eor r6,r23 +107: + cpi r22,6 + brcs 115f + rcall 153f + ldi r23,137 + eor r6,r23 + ldi r17,128 + eor r7,r17 +115: + cpi r22,5 + brcs 123f + rcall 153f + ldi r23,3 + eor r6,r23 + ldi r17,128 + eor r7,r17 +123: + cpi r22,4 + brcs 131f + rcall 153f + ldi r23,2 + eor r6,r23 + ldi r17,128 + eor r7,r17 +131: + cpi r22,3 + brcs 137f + rcall 153f + ldi r23,128 + eor r6,r23 +137: + cpi r22,2 + brcs 145f + rcall 153f + ldi r23,10 + eor r6,r23 + ldi r17,128 + eor r7,r17 +145: + cpi r22,1 + brcs 151f + rcall 153f + ldi r23,10 + eor r6,r23 +151: + rjmp 1004f +153: + movw r18,r6 + ldd r0,Z+10 + eor r18,r0 + ldd r0,Z+11 + eor r19,r0 + ldd r0,Z+20 + eor r18,r0 + ldd r0,Z+21 + eor r19,r0 + ldd r0,Z+30 + eor r18,r0 + ldd r0,Z+31 + eor r19,r0 + ldd r0,Z+40 + eor r18,r0 + ldd r0,Z+41 + eor r19,r0 + movw r20,r8 + ldd r0,Z+12 + eor r20,r0 + ldd r0,Z+13 + eor r21,r0 + ldd r0,Z+22 + eor r20,r0 + ldd r0,Z+23 + eor r21,r0 + ldd r0,Z+32 + eor r20,r0 + ldd r0,Z+33 + eor r21,r0 + ldd r0,Z+42 + eor r20,r0 + ldd r0,Z+43 + eor r21,r0 + movw r26,r10 + ldd r0,Z+14 + eor r26,r0 + ldd r0,Z+15 + eor r27,r0 + ldd r0,Z+24 + eor r26,r0 + ldd r0,Z+25 + eor r27,r0 + ldd r0,Z+34 + eor r26,r0 + ldd r0,Z+35 + eor r27,r0 + ldd r0,Z+44 + eor r26,r0 + ldd r0,Z+45 + eor r27,r0 + movw r2,r12 + ldd r0,Z+16 + eor r2,r0 + ldd r0,Z+17 + eor r3,r0 + ldd r0,Z+26 + eor r2,r0 + ldd r0,Z+27 + eor r3,r0 + ldd r0,Z+36 + eor r2,r0 + ldd r0,Z+37 + eor r3,r0 + ldd r0,Z+46 + eor r2,r0 + ldd r0,Z+47 + eor r3,r0 + movw r4,r14 + ldd r0,Z+18 + eor r4,r0 + ldd r0,Z+19 + eor r5,r0 + ldd r0,Z+28 + eor r4,r0 + ldd r0,Z+29 + eor r5,r0 + ldd r0,Z+38 + eor r4,r0 + ldd r0,Z+39 + eor r5,r0 + ldd r0,Z+48 + eor r4,r0 + ldd r0,Z+49 + eor r5,r0 + movw r24,r20 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r4 + eor r25,r5 + eor r6,r24 + eor r7,r25 + ldd r0,Z+10 + eor r0,r24 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r25 + std Z+11,r0 + ldd r0,Z+20 + eor r0,r24 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r25 + std Z+21,r0 + ldd r0,Z+30 + eor r0,r24 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r25 + std Z+31,r0 + ldd r0,Z+40 + eor r0,r24 + std Z+40,r0 + ldd r0,Z+41 + eor r0,r25 + std Z+41,r0 + movw r24,r26 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r8,r24 + eor r9,r25 + ldd r0,Z+12 + eor r0,r24 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r25 + std Z+13,r0 + ldd r0,Z+22 + eor r0,r24 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r25 + std Z+23,r0 + ldd r0,Z+32 + eor r0,r24 + std Z+32,r0 + ldd r0,Z+33 + eor r0,r25 + std Z+33,r0 + ldd r0,Z+42 + eor r0,r24 + std Z+42,r0 + ldd r0,Z+43 + eor r0,r25 + std Z+43,r0 + movw r24,r2 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r10,r24 + eor r11,r25 + ldd r0,Z+14 + eor r0,r24 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r25 + std Z+15,r0 + ldd r0,Z+24 + eor r0,r24 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r25 + std Z+25,r0 + ldd r0,Z+34 + eor r0,r24 + std Z+34,r0 + ldd r0,Z+35 + eor r0,r25 + std Z+35,r0 + ldd r0,Z+44 + eor r0,r24 + std Z+44,r0 + ldd r0,Z+45 + eor r0,r25 + std Z+45,r0 + movw r24,r4 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r26 + eor r25,r27 + eor r12,r24 + eor r13,r25 + ldd r0,Z+16 + eor r0,r24 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r25 + std Z+17,r0 + ldd r0,Z+26 + eor r0,r24 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r25 + std Z+27,r0 + ldd r0,Z+36 + eor r0,r24 + std Z+36,r0 + ldd r0,Z+37 + eor r0,r25 + std Z+37,r0 + ldd r0,Z+46 + eor r0,r24 + std Z+46,r0 + ldd r0,Z+47 + eor r0,r25 + std Z+47,r0 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r2 + eor r25,r3 + eor r14,r24 + eor r15,r25 + ldd r0,Z+18 + eor r0,r24 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r25 + std Z+19,r0 + ldd r0,Z+28 + eor r0,r24 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r25 + std Z+29,r0 + ldd r0,Z+38 + eor r0,r24 + std Z+38,r0 + ldd r0,Z+39 + eor r0,r25 + std Z+39,r0 + ldd r0,Z+48 + eor r0,r24 + std Z+48,r0 + ldd r0,Z+49 + eor r0,r25 + std Z+49,r0 + movw r24,r8 + ldd r8,Z+12 + ldd r9,Z+13 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldd r18,Z+18 + ldd r19,Z+19 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+12,r18 + std Z+13,r19 + ldd r18,Z+44 + ldd r19,Z+45 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+18,r18 + std Z+19,r19 + ldd r18,Z+28 + ldd r19,Z+29 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+44,r18 + std Z+45,r19 + ldd r18,Z+40 + ldd r19,Z+41 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+28,r18 + std Z+29,r19 + movw r18,r10 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+40,r18 + std Z+41,r19 + ldd r10,Z+24 + ldd r11,Z+25 + mov r0,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldd r18,Z+26 + ldd r19,Z+27 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+24,r18 + std Z+25,r19 + ldd r18,Z+38 + ldd r19,Z+39 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+26,r18 + std Z+27,r19 + ldd r18,Z+46 + ldd r19,Z+47 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+38,r18 + std Z+39,r19 + ldd r18,Z+30 + ldd r19,Z+31 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+46,r18 + std Z+47,r19 + movw r18,r14 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+30,r18 + std Z+31,r19 + ldd r14,Z+48 + ldd r15,Z+49 + mov r0,r1 + lsr r15 + ror r14 + ror r0 + lsr r15 + ror r14 + ror r0 + or r15,r0 + ldd r18,Z+42 + ldd r19,Z+43 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+48,r18 + std Z+49,r19 + ldd r18,Z+16 + ldd r19,Z+17 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+42,r18 + std Z+43,r19 + ldd r18,Z+32 + ldd r19,Z+33 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+16,r18 + std Z+17,r19 + ldd r18,Z+10 + ldd r19,Z+11 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+32,r18 + std Z+33,r19 + movw r18,r12 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+10,r18 + std Z+11,r19 + ldd r12,Z+36 + ldd r13,Z+37 + mov r0,r13 + mov r13,r12 + mov r12,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + ldd r18,Z+34 + ldd r19,Z+35 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+36,r18 + std Z+37,r19 + ldd r18,Z+22 + ldd r19,Z+23 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+34,r18 + std Z+35,r19 + ldd r18,Z+14 + ldd r19,Z+15 + mov r0,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+22,r18 + std Z+23,r19 + ldd r18,Z+20 + ldd r19,Z+21 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+14,r18 + std Z+15,r19 + lsl r24 + rol r25 + adc r24,r1 + std Z+20,r24 + std Z+21,r25 + movw r18,r6 + movw r20,r8 + movw r26,r10 + movw r2,r12 + movw r4,r14 + movw r6,r26 + mov r0,r20 + com r0 + and r6,r0 + mov r0,r21 + com r0 + and r7,r0 + eor r6,r18 + eor r7,r19 + movw r8,r2 + mov r0,r26 + com r0 + and r8,r0 + mov r0,r27 + com r0 + and r9,r0 + eor r8,r20 + eor r9,r21 + movw r10,r4 + mov r0,r2 + com r0 + and r10,r0 + mov r0,r3 + com r0 + and r11,r0 + eor r10,r26 + eor r11,r27 + movw r12,r18 + mov r0,r4 + com r0 + and r12,r0 + mov r0,r5 + com r0 + and r13,r0 + eor r12,r2 + eor r13,r3 + movw r14,r20 + mov r0,r18 + com r0 + and r14,r0 + mov r0,r19 + com r0 + and r15,r0 + eor r14,r4 + eor r15,r5 + ldd r18,Z+10 + ldd r19,Z+11 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+10,r24 + std Z+11,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+12,r24 + std Z+13,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+14,r24 + std Z+15,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+16,r24 + std Z+17,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+18,r24 + std Z+19,r25 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+20,r24 + std Z+21,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+22,r24 + std Z+23,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+24,r24 + std Z+25,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+26,r24 + std Z+27,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+28,r24 + std Z+29,r25 + ldd r18,Z+30 + ldd r19,Z+31 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+30,r24 + std Z+31,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+32,r24 + std Z+33,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+34,r24 + std Z+35,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+36,r24 + std Z+37,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+38,r24 + std Z+39,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + ldd r4,Z+48 + ldd r5,Z+49 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+40,r24 + std Z+41,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+42,r24 + std Z+43,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+44,r24 + std Z+45,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+46,r24 + std Z+47,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+48,r24 + std Z+49,r25 + ret +1004: + st Z,r6 + std Z+1,r7 + std Z+2,r8 + std Z+3,r9 + std Z+4,r10 + std Z+5,r11 + std Z+6,r12 + std Z+7,r13 + std Z+8,r14 + std Z+9,r15 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size keccakp_400_permute, .-keccakp_400_permute + +#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.c index c3c4011..60539df 100644 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.c +++ b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.c @@ -22,74 +22,79 @@ #include "internal-keccak.h" +#if !defined(__AVR__) + /* Faster method to compute ((x + y) % 5) that avoids the division */ static unsigned char const addMod5Table[9] = { 0, 1, 2, 3, 4, 0, 1, 2, 3 }; #define addMod5(x, y) (addMod5Table[(x) + (y)]) -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds) +void keccakp_200_permute(keccakp_200_state_t *state) { static uint8_t const RC[18] = { 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, 0x02, 0x80 }; - uint8_t B[5][5]; + uint8_t C[5]; uint8_t D; unsigned round; unsigned index, index2; - for (round = 18 - rounds; round < 18; ++round) { + for (round = 0; round < 18; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_8(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_8(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate4_8(state->A[0][3]); - B[2][0] = leftRotate1_8(state->A[0][1]); - B[3][0] = leftRotate3_8(state->A[0][4]); - B[4][0] = leftRotate6_8(state->A[0][2]); - B[0][1] = leftRotate4_8(state->A[1][1]); - B[1][1] = leftRotate4_8(state->A[1][4]); - B[2][1] = leftRotate6_8(state->A[1][2]); - B[3][1] = leftRotate4_8(state->A[1][0]); - B[4][1] = leftRotate7_8(state->A[1][3]); - B[0][2] = leftRotate3_8(state->A[2][2]); - B[1][2] = leftRotate3_8(state->A[2][0]); - B[2][2] = leftRotate1_8(state->A[2][3]); - B[3][2] = leftRotate2_8(state->A[2][1]); - B[4][2] = leftRotate7_8(state->A[2][4]); - B[0][3] = leftRotate5_8(state->A[3][3]); - B[1][3] = leftRotate5_8(state->A[3][1]); - B[2][3] = state->A[3][4]; - B[3][3] = leftRotate7_8(state->A[3][2]); - B[4][3] = leftRotate1_8(state->A[3][0]); - B[0][4] = leftRotate6_8(state->A[4][4]); - B[1][4] = leftRotate5_8(state->A[4][2]); - B[2][4] = leftRotate2_8(state->A[4][0]); - B[3][4] = state->A[4][3]; - B[4][4] = leftRotate2_8(state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate4_8(state->A[1][1]); + state->A[1][1] = leftRotate4_8(state->A[1][4]); + state->A[1][4] = leftRotate5_8(state->A[4][2]); + state->A[4][2] = leftRotate7_8(state->A[2][4]); + state->A[2][4] = leftRotate2_8(state->A[4][0]); + state->A[4][0] = leftRotate6_8(state->A[0][2]); + state->A[0][2] = leftRotate3_8(state->A[2][2]); + state->A[2][2] = leftRotate1_8(state->A[2][3]); + state->A[2][3] = state->A[3][4]; + state->A[3][4] = state->A[4][3]; + state->A[4][3] = leftRotate1_8(state->A[3][0]); + state->A[3][0] = leftRotate3_8(state->A[0][4]); + state->A[0][4] = leftRotate6_8(state->A[4][4]); + state->A[4][4] = leftRotate2_8(state->A[4][1]); + state->A[4][1] = leftRotate7_8(state->A[1][3]); + state->A[1][3] = leftRotate5_8(state->A[3][1]); + state->A[3][1] = leftRotate4_8(state->A[1][0]); + state->A[1][0] = leftRotate4_8(state->A[0][3]); + state->A[0][3] = leftRotate5_8(state->A[3][3]); + state->A[3][3] = leftRotate7_8(state->A[3][2]); + state->A[3][2] = leftRotate2_8(state->A[2][1]); + state->A[2][1] = leftRotate6_8(state->A[1][2]); + state->A[1][2] = leftRotate3_8(state->A[2][0]); + state->A[2][0] = leftRotate1_8(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -110,61 +115,64 @@ void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, 0x8002, 0x0080, 0x800A, 0x000A }; - uint16_t B[5][5]; + uint16_t C[5]; uint16_t D; unsigned round; unsigned index, index2; for (round = 20 - rounds; round < 20; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_16(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_16(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate12_16(state->A[0][3]); - B[2][0] = leftRotate1_16 (state->A[0][1]); - B[3][0] = leftRotate11_16(state->A[0][4]); - B[4][0] = leftRotate14_16(state->A[0][2]); - B[0][1] = leftRotate12_16(state->A[1][1]); - B[1][1] = leftRotate4_16 (state->A[1][4]); - B[2][1] = leftRotate6_16 (state->A[1][2]); - B[3][1] = leftRotate4_16 (state->A[1][0]); - B[4][1] = leftRotate7_16 (state->A[1][3]); - B[0][2] = leftRotate11_16(state->A[2][2]); - B[1][2] = leftRotate3_16 (state->A[2][0]); - B[2][2] = leftRotate9_16 (state->A[2][3]); - B[3][2] = leftRotate10_16(state->A[2][1]); - B[4][2] = leftRotate7_16 (state->A[2][4]); - B[0][3] = leftRotate5_16 (state->A[3][3]); - B[1][3] = leftRotate13_16(state->A[3][1]); - B[2][3] = leftRotate8_16 (state->A[3][4]); - B[3][3] = leftRotate15_16(state->A[3][2]); - B[4][3] = leftRotate9_16 (state->A[3][0]); - B[0][4] = leftRotate14_16(state->A[4][4]); - B[1][4] = leftRotate13_16(state->A[4][2]); - B[2][4] = leftRotate2_16 (state->A[4][0]); - B[3][4] = leftRotate8_16 (state->A[4][3]); - B[4][4] = leftRotate2_16 (state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate12_16(state->A[1][1]); + state->A[1][1] = leftRotate4_16 (state->A[1][4]); + state->A[1][4] = leftRotate13_16(state->A[4][2]); + state->A[4][2] = leftRotate7_16 (state->A[2][4]); + state->A[2][4] = leftRotate2_16 (state->A[4][0]); + state->A[4][0] = leftRotate14_16(state->A[0][2]); + state->A[0][2] = leftRotate11_16(state->A[2][2]); + state->A[2][2] = leftRotate9_16 (state->A[2][3]); + state->A[2][3] = leftRotate8_16 (state->A[3][4]); + state->A[3][4] = leftRotate8_16 (state->A[4][3]); + state->A[4][3] = leftRotate9_16 (state->A[3][0]); + state->A[3][0] = leftRotate11_16(state->A[0][4]); + state->A[0][4] = leftRotate14_16(state->A[4][4]); + state->A[4][4] = leftRotate2_16 (state->A[4][1]); + state->A[4][1] = leftRotate7_16 (state->A[1][3]); + state->A[1][3] = leftRotate13_16(state->A[3][1]); + state->A[3][1] = leftRotate4_16 (state->A[1][0]); + state->A[1][0] = leftRotate12_16(state->A[0][3]); + state->A[0][3] = leftRotate5_16 (state->A[3][3]); + state->A[3][3] = leftRotate15_16(state->A[3][2]); + state->A[3][2] = leftRotate10_16(state->A[2][1]); + state->A[2][1] = leftRotate6_16 (state->A[1][2]); + state->A[1][2] = leftRotate3_16 (state->A[2][0]); + state->A[2][0] = leftRotate1_16(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -202,3 +210,5 @@ void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) } #endif + +#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.h index 026da50..2ffef42 100644 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.h +++ b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-keccak.h @@ -68,9 +68,8 @@ typedef union * \brief Permutes the Keccak-p[200] state. * * \param state The Keccak-p[200] state to be permuted. - * \param rounds The number of rounds to perform (up to 18). */ -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds); +void keccakp_200_permute(keccakp_200_state_t *state); /** * \brief Permutes the Keccak-p[400] state, which is assumed to be in diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent-avr.S b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent-avr.S new file mode 100644 index 0000000..4a43458 --- /dev/null +++ b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent-avr.S @@ -0,0 +1,1677 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 238 + .byte 237 + .byte 235 + .byte 224 + .byte 226 + .byte 225 + .byte 228 + .byte 239 + .byte 231 + .byte 234 + .byte 232 + .byte 229 + .byte 233 + .byte 236 + .byte 227 + .byte 230 + .byte 222 + .byte 221 + .byte 219 + .byte 208 + .byte 210 + .byte 209 + .byte 212 + .byte 223 + .byte 215 + .byte 218 + .byte 216 + .byte 213 + .byte 217 + .byte 220 + .byte 211 + .byte 214 + .byte 190 + .byte 189 + .byte 187 + .byte 176 + .byte 178 + .byte 177 + .byte 180 + .byte 191 + .byte 183 + .byte 186 + .byte 184 + .byte 181 + .byte 185 + .byte 188 + .byte 179 + .byte 182 + .byte 14 + .byte 13 + .byte 11 + .byte 0 + .byte 2 + .byte 1 + .byte 4 + .byte 15 + .byte 7 + .byte 10 + .byte 8 + .byte 5 + .byte 9 + .byte 12 + .byte 3 + .byte 6 + .byte 46 + .byte 45 + .byte 43 + .byte 32 + .byte 34 + .byte 33 + .byte 36 + .byte 47 + .byte 39 + .byte 42 + .byte 40 + .byte 37 + .byte 41 + .byte 44 + .byte 35 + .byte 38 + .byte 30 + .byte 29 + .byte 27 + .byte 16 + .byte 18 + .byte 17 + .byte 20 + .byte 31 + .byte 23 + .byte 26 + .byte 24 + .byte 21 + .byte 25 + .byte 28 + .byte 19 + .byte 22 + .byte 78 + .byte 77 + .byte 75 + .byte 64 + .byte 66 + .byte 65 + .byte 68 + .byte 79 + .byte 71 + .byte 74 + .byte 72 + .byte 69 + .byte 73 + .byte 76 + .byte 67 + .byte 70 + .byte 254 + .byte 253 + .byte 251 + .byte 240 + .byte 242 + .byte 241 + .byte 244 + .byte 255 + .byte 247 + .byte 250 + .byte 248 + .byte 245 + .byte 249 + .byte 252 + .byte 243 + .byte 246 + .byte 126 + .byte 125 + .byte 123 + .byte 112 + .byte 114 + .byte 113 + .byte 116 + .byte 127 + .byte 119 + .byte 122 + .byte 120 + .byte 117 + .byte 121 + .byte 124 + .byte 115 + .byte 118 + .byte 174 + .byte 173 + .byte 171 + .byte 160 + .byte 162 + .byte 161 + .byte 164 + .byte 175 + .byte 167 + .byte 170 + .byte 168 + .byte 165 + .byte 169 + .byte 172 + .byte 163 + .byte 166 + .byte 142 + .byte 141 + .byte 139 + .byte 128 + .byte 130 + .byte 129 + .byte 132 + .byte 143 + .byte 135 + .byte 138 + .byte 136 + .byte 133 + .byte 137 + .byte 140 + .byte 131 + .byte 134 + .byte 94 + .byte 93 + .byte 91 + .byte 80 + .byte 82 + .byte 81 + .byte 84 + .byte 95 + .byte 87 + .byte 90 + .byte 88 + .byte 85 + .byte 89 + .byte 92 + .byte 83 + .byte 86 + .byte 158 + .byte 157 + .byte 155 + .byte 144 + .byte 146 + .byte 145 + .byte 148 + .byte 159 + .byte 151 + .byte 154 + .byte 152 + .byte 149 + .byte 153 + .byte 156 + .byte 147 + .byte 150 + .byte 206 + .byte 205 + .byte 203 + .byte 192 + .byte 194 + .byte 193 + .byte 196 + .byte 207 + .byte 199 + .byte 202 + .byte 200 + .byte 197 + .byte 201 + .byte 204 + .byte 195 + .byte 198 + .byte 62 + .byte 61 + .byte 59 + .byte 48 + .byte 50 + .byte 49 + .byte 52 + .byte 63 + .byte 55 + .byte 58 + .byte 56 + .byte 53 + .byte 57 + .byte 60 + .byte 51 + .byte 54 + .byte 110 + .byte 109 + .byte 107 + .byte 96 + .byte 98 + .byte 97 + .byte 100 + .byte 111 + .byte 103 + .byte 106 + .byte 104 + .byte 101 + .byte 105 + .byte 108 + .byte 99 + .byte 102 + + .text +.global spongent160_permute + .type spongent160_permute, @function +spongent160_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 +.L__stack_usage = 16 + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + ldd r10,Z+12 + ldd r11,Z+13 + ldd r12,Z+14 + ldd r13,Z+15 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r24,Z+18 + ldd r25,Z+19 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r21,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r21 +#endif + ldi r18,80 + ldi r19,117 + ldi r20,174 +25: + eor r22,r19 + eor r25,r20 + lsl r19 + bst r19,7 + bld r19,0 + mov r0,r1 + bst r19,6 + bld r0,0 + eor r19,r0 + andi r19,127 + lsr r20 + bst r20,0 + bld r20,7 + mov r0,r1 + bst r20,1 + bld r0,7 + eor r20,r0 + andi r20,254 + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r28 +#if defined(RAMPZ) + elpm r28,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r28,Z +#elif defined(__AVR_TINY__) + ld r28,Z +#else + lpm + mov r28,r0 +#endif + mov r30,r29 +#if defined(RAMPZ) + elpm r29,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r29,Z +#elif defined(__AVR_TINY__) + ld r29,Z +#else + lpm + mov r29,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r28,0 + bld r22,4 + bst r6,0 + bld r28,0 + bst r10,1 + bld r6,0 + bst r6,6 + bld r10,1 + bst r13,1 + bld r6,6 + bst r22,7 + bld r13,1 + bst r29,4 + bld r22,7 + bst r12,0 + bld r29,4 + bst r14,2 + bld r12,0 + bst r3,3 + bld r14,2 + bst r23,5 + bld r3,3 + bst r4,4 + bld r23,5 + bst r4,1 + bld r4,4 + bst r2,5 + bld r4,1 + bst r24,4 + bld r2,5 + bst r12,3 + bld r24,4 + bst r15,6 + bld r12,3 + bst r9,3 + bld r15,6 + bst r3,6 + bld r9,3 + bst r29,1 + bld r3,6 + bst r10,4 + bld r29,1 + bst r8,2 + bld r10,4 + bst r23,2 + bld r8,2 + bst r3,0 + bld r23,2 + bst r0,0 + bld r3,0 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r2,0 + bld r23,0 + bst r14,0 + bld r2,0 + bst r2,3 + bld r14,0 + bst r15,4 + bld r2,3 + bst r8,3 + bld r15,4 + bst r23,6 + bld r8,3 + bst r5,0 + bld r23,6 + bst r6,1 + bld r5,0 + bst r10,5 + bld r6,1 + bst r8,6 + bld r10,5 + bst r29,2 + bld r8,6 + bst r11,0 + bld r29,2 + bst r10,2 + bld r11,0 + bst r7,2 + bld r10,2 + bst r15,1 + bld r7,2 + bst r6,7 + bld r15,1 + bst r13,5 + bld r6,7 + bst r28,7 + bld r13,5 + bst r9,4 + bld r28,7 + bst r4,2 + bld r9,4 + bst r3,1 + bld r4,2 + bst r22,5 + bld r3,1 + bst r28,4 + bld r22,5 + bst r8,0 + bld r28,4 + bst r0,0 + bld r8,0 + bst r22,3 + bld r0,0 + bst r23,4 + bld r22,3 + bst r4,0 + bld r23,4 + bst r2,1 + bld r4,0 + bst r14,4 + bld r2,1 + bst r4,3 + bld r14,4 + bst r3,5 + bld r4,3 + bst r28,5 + bld r3,5 + bst r8,4 + bld r28,5 + bst r28,2 + bld r8,4 + bst r7,0 + bld r28,2 + bst r14,1 + bld r7,0 + bst r2,7 + bld r14,1 + bst r25,4 + bld r2,7 + bst r24,3 + bld r25,4 + bst r11,7 + bld r24,3 + bst r13,6 + bld r11,7 + bst r29,3 + bld r13,6 + bst r11,4 + bld r29,3 + bst r12,2 + bld r11,4 + bst r15,2 + bld r12,2 + bst r7,3 + bld r15,2 + bst r15,5 + bld r7,3 + bst r8,7 + bld r15,5 + bst r29,6 + bld r8,7 + bst r13,0 + bld r29,6 + bst r0,0 + bld r13,0 + bst r22,6 + bld r0,0 + bst r29,0 + bld r22,6 + bst r10,0 + bld r29,0 + bst r6,2 + bld r10,0 + bst r11,1 + bld r6,2 + bst r10,6 + bld r11,1 + bst r9,2 + bld r10,6 + bst r3,2 + bld r9,2 + bst r23,1 + bld r3,2 + bst r2,4 + bld r23,1 + bst r24,0 + bld r2,4 + bst r10,3 + bld r24,0 + bst r7,6 + bld r10,3 + bst r25,1 + bld r7,6 + bst r14,7 + bld r25,1 + bst r5,7 + bld r14,7 + bst r9,5 + bld r5,7 + bst r4,6 + bld r9,5 + bst r5,1 + bld r4,6 + bst r6,5 + bld r5,1 + bst r12,5 + bld r6,5 + bst r24,6 + bld r12,5 + bst r13,3 + bld r24,6 + bst r23,7 + bld r13,3 + bst r5,4 + bld r23,7 + bst r8,1 + bld r5,4 + bst r0,0 + bld r8,1 + bst r23,3 + bld r0,0 + bst r3,4 + bld r23,3 + bst r28,1 + bld r3,4 + bst r6,4 + bld r28,1 + bst r12,1 + bld r6,4 + bst r14,6 + bld r12,1 + bst r5,3 + bld r14,6 + bst r7,5 + bld r5,3 + bst r24,5 + bld r7,5 + bst r12,7 + bld r24,5 + bst r25,6 + bld r12,7 + bst r25,3 + bld r25,6 + bst r15,7 + bld r25,3 + bst r9,7 + bld r15,7 + bst r5,6 + bld r9,7 + bst r9,1 + bld r5,6 + bst r2,6 + bld r9,1 + bst r25,0 + bld r2,6 + bst r14,3 + bld r25,0 + bst r3,7 + bld r14,3 + bst r29,5 + bld r3,7 + bst r12,4 + bld r29,5 + bst r24,2 + bld r12,4 + bst r11,3 + bld r24,2 + bst r11,6 + bld r11,3 + bst r13,2 + bld r11,6 + bst r0,0 + bld r13,2 + bst r28,3 + bld r0,0 + bst r7,4 + bld r28,3 + bst r24,1 + bld r7,4 + bst r10,7 + bld r24,1 + bst r9,6 + bld r10,7 + bst r5,2 + bld r9,6 + bst r7,1 + bld r5,2 + bst r14,5 + bld r7,1 + bst r4,7 + bld r14,5 + bst r5,5 + bld r4,7 + bst r8,5 + bld r5,5 + bst r28,6 + bld r8,5 + bst r9,0 + bld r28,6 + bst r2,2 + bld r9,0 + bst r15,0 + bld r2,2 + bst r6,3 + bld r15,0 + bst r11,5 + bld r6,3 + bst r12,6 + bld r11,5 + bst r25,2 + bld r12,6 + bst r15,3 + bld r25,2 + bst r7,7 + bld r15,3 + bst r25,5 + bld r7,7 + bst r24,7 + bld r25,5 + bst r13,7 + bld r24,7 + bst r29,7 + bld r13,7 + bst r13,4 + bld r29,7 + bst r0,0 + bld r13,4 + dec r18 + breq 5389f + rjmp 25b +5389: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + st X+,r22 + st X+,r23 + st X+,r28 + st X+,r29 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size spongent160_permute, .-spongent160_permute + + .text +.global spongent176_permute + .type spongent176_permute, @function +spongent176_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + ldd r10,Z+12 + ldd r11,Z+13 + ldd r12,Z+14 + ldd r13,Z+15 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r24,Z+18 + ldd r25,Z+19 + ldd r16,Z+20 + ldd r17,Z+21 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r21,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r21 +#endif + ldi r18,90 + ldi r19,69 + ldi r20,162 +27: + eor r22,r19 + eor r17,r20 + lsl r19 + bst r19,7 + bld r19,0 + mov r0,r1 + bst r19,6 + bld r0,0 + eor r19,r0 + andi r19,127 + lsr r20 + bst r20,0 + bld r20,7 + mov r0,r1 + bst r20,1 + bld r0,7 + eor r20,r0 + andi r20,254 + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r28 +#if defined(RAMPZ) + elpm r28,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r28,Z +#elif defined(__AVR_TINY__) + ld r28,Z +#else + lpm + mov r28,r0 +#endif + mov r30,r29 +#if defined(RAMPZ) + elpm r29,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r29,Z +#elif defined(__AVR_TINY__) + ld r29,Z +#else + lpm + mov r29,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r28,0 + bld r22,4 + bst r6,0 + bld r28,0 + bst r8,1 + bld r6,0 + bst r24,5 + bld r8,1 + bst r6,7 + bld r24,5 + bst r11,5 + bld r6,7 + bst r8,6 + bld r11,5 + bst r17,1 + bld r8,6 + bst r24,7 + bld r17,1 + bst r7,7 + bld r24,7 + bst r15,5 + bld r7,7 + bst r2,7 + bld r15,5 + bst r25,4 + bld r2,7 + bst r10,3 + bld r25,4 + bst r3,6 + bld r10,3 + bst r23,1 + bld r3,6 + bst r2,4 + bld r23,1 + bst r24,0 + bld r2,4 + bst r4,3 + bld r24,0 + bst r29,5 + bld r4,3 + bst r12,4 + bld r29,5 + bst r12,2 + bld r12,4 + bst r11,2 + bld r12,2 + bst r7,2 + bld r11,2 + bst r13,1 + bld r7,2 + bst r14,6 + bld r13,1 + bst r23,3 + bld r14,6 + bst r3,4 + bld r23,3 + bst r0,0 + bld r3,4 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r2,0 + bld r23,0 + bst r14,0 + bld r2,0 + bst r16,2 + bld r14,0 + bst r13,3 + bld r16,2 + bst r15,6 + bld r13,3 + bst r3,3 + bld r15,6 + bst r17,4 + bld r3,3 + bst r16,3 + bld r17,4 + bst r13,7 + bld r16,3 + bst r25,6 + bld r13,7 + bst r11,3 + bld r25,6 + bst r7,6 + bld r11,3 + bst r15,1 + bld r7,6 + bst r28,7 + bld r15,1 + bst r9,4 + bld r28,7 + bst r28,2 + bld r9,4 + bst r7,0 + bld r28,2 + bst r12,1 + bld r7,0 + bst r10,6 + bld r12,1 + bst r5,2 + bld r10,6 + bst r5,1 + bld r5,2 + bst r4,5 + bld r5,1 + bst r2,5 + bld r4,5 + bst r24,4 + bld r2,5 + bst r6,3 + bld r24,4 + bst r9,5 + bld r6,3 + bst r28,6 + bld r9,5 + bst r9,0 + bld r28,6 + bst r0,0 + bld r9,0 + bst r22,3 + bld r0,0 + bst r23,4 + bld r22,3 + bst r4,0 + bld r23,4 + bst r28,1 + bld r4,0 + bst r6,4 + bld r28,1 + bst r10,1 + bld r6,4 + bst r2,6 + bld r10,1 + bst r25,0 + bld r2,6 + bst r8,3 + bld r25,0 + bst r25,5 + bld r8,3 + bst r10,7 + bld r25,5 + bst r5,6 + bld r10,7 + bst r7,1 + bld r5,6 + bst r12,5 + bld r7,1 + bst r12,6 + bld r12,5 + bst r13,2 + bld r12,6 + bst r15,2 + bld r13,2 + bst r29,3 + bld r15,2 + bst r11,4 + bld r29,3 + bst r8,2 + bld r11,4 + bst r25,1 + bld r8,2 + bst r8,7 + bld r25,1 + bst r17,5 + bld r8,7 + bst r16,7 + bld r17,5 + bst r15,7 + bld r16,7 + bst r3,7 + bld r15,7 + bst r23,5 + bld r3,7 + bst r4,4 + bld r23,5 + bst r2,1 + bld r4,4 + bst r14,4 + bld r2,1 + bst r0,0 + bld r14,4 + bst r22,5 + bld r0,0 + bst r28,4 + bld r22,5 + bst r8,0 + bld r28,4 + bst r24,1 + bld r8,0 + bst r4,7 + bld r24,1 + bst r3,5 + bld r4,7 + bst r0,0 + bld r3,5 + bst r22,6 + bld r0,0 + bst r29,0 + bld r22,6 + bst r10,0 + bld r29,0 + bst r2,2 + bld r10,0 + bst r15,0 + bld r2,2 + bst r28,3 + bld r15,0 + bst r7,4 + bld r28,3 + bst r14,1 + bld r7,4 + bst r16,6 + bld r14,1 + bst r15,3 + bld r16,6 + bst r29,7 + bld r15,3 + bst r13,4 + bld r29,7 + bst r24,2 + bld r13,4 + bst r5,3 + bld r24,2 + bst r5,5 + bld r5,3 + bst r6,5 + bld r5,5 + bst r10,5 + bld r6,5 + bst r4,6 + bld r10,5 + bst r3,1 + bld r4,6 + bst r16,4 + bld r3,1 + bst r14,3 + bld r16,4 + bst r17,6 + bld r14,3 + bst r17,3 + bld r17,6 + bst r25,7 + bld r17,3 + bst r11,7 + bld r25,7 + bst r9,6 + bld r11,7 + bst r29,2 + bld r9,6 + bst r11,0 + bld r29,2 + bst r6,2 + bld r11,0 + bst r9,1 + bld r6,2 + bst r0,0 + bld r9,1 + bst r22,7 + bld r0,0 + bst r29,4 + bld r22,7 + bst r12,0 + bld r29,4 + bst r10,2 + bld r12,0 + bst r3,2 + bld r10,2 + bst r17,0 + bld r3,2 + bst r24,3 + bld r17,0 + bst r5,7 + bld r24,3 + bst r7,5 + bld r5,7 + bst r14,5 + bld r7,5 + bst r0,0 + bld r14,5 + bst r23,2 + bld r0,0 + bst r3,0 + bld r23,2 + bst r16,0 + bld r3,0 + bst r12,3 + bld r16,0 + bst r11,6 + bld r12,3 + bst r9,2 + bld r11,6 + bst r0,0 + bld r9,2 + bst r23,6 + bld r0,0 + bst r5,0 + bld r23,6 + bst r4,1 + bld r5,0 + bst r28,5 + bld r4,1 + bst r8,4 + bld r28,5 + bst r16,1 + bld r8,4 + bst r12,7 + bld r16,1 + bst r13,6 + bld r12,7 + bst r25,2 + bld r13,6 + bst r9,3 + bld r25,2 + bst r0,0 + bld r9,3 + bst r23,7 + bld r0,0 + bst r5,4 + bld r23,7 + bst r6,1 + bld r5,4 + bst r8,5 + bld r6,1 + bst r16,5 + bld r8,5 + bst r14,7 + bld r16,5 + bst r0,0 + bld r14,7 + bst r29,1 + bld r0,0 + bst r10,4 + bld r29,1 + bst r4,2 + bld r10,4 + bst r0,0 + bld r4,2 + bst r29,6 + bld r0,0 + bst r13,0 + bld r29,6 + bst r14,2 + bld r13,0 + bst r17,2 + bld r14,2 + bst r25,3 + bld r17,2 + bst r9,7 + bld r25,3 + bst r0,0 + bld r9,7 + bst r2,3 + bld r0,0 + bst r15,4 + bld r2,3 + bst r0,0 + bld r15,4 + bst r6,6 + bld r0,0 + bst r11,1 + bld r6,6 + bst r0,0 + bld r11,1 + bst r7,3 + bld r0,0 + bst r13,5 + bld r7,3 + bst r24,6 + bld r13,5 + bst r0,0 + bld r24,6 + dec r18 + breq 5445f + rjmp 27b +5445: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + st X+,r22 + st X+,r23 + st X+,r28 + st X+,r29 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + st X+,r16 + st X+,r17 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size spongent176_permute, .-spongent176_permute + +#endif diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent.c b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent.c index 69a8ecb..8e0d57d 100644 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent.c +++ b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-spongent.c @@ -22,6 +22,8 @@ #include "internal-spongent.h" +#if !defined(__AVR__) + /** * \brief Applies the Spongent-pi S-box in parallel to the 8 nibbles * of a 32-bit word. @@ -344,3 +346,5 @@ void spongent176_permute(spongent176_state_t *state) le_store_word16(state->B + 20, x5); /* Last word is only 16 bits */ #endif } + +#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-util.h b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-util.h +++ b/elephant/Implementations/crypto_aead/elephant160v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/api.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/api.h deleted file mode 100644 index 32c9622..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.c deleted file mode 100644 index 2f7abb3..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.c +++ /dev/null @@ -1,881 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "elephant.h" -#include "internal-keccak.h" -#include "internal-spongent.h" -#include - -aead_cipher_t const dumbo_cipher = { - "Dumbo", - DUMBO_KEY_SIZE, - DUMBO_NONCE_SIZE, - DUMBO_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - dumbo_aead_encrypt, - dumbo_aead_decrypt -}; - -aead_cipher_t const jumbo_cipher = { - "Jumbo", - JUMBO_KEY_SIZE, - JUMBO_NONCE_SIZE, - JUMBO_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - jumbo_aead_encrypt, - jumbo_aead_decrypt -}; - -aead_cipher_t const delirium_cipher = { - "Delirium", - DELIRIUM_KEY_SIZE, - DELIRIUM_NONCE_SIZE, - DELIRIUM_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - delirium_aead_encrypt, - delirium_aead_decrypt -}; - -/** - * \brief Applies the Dumbo LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void dumbo_lfsr - (unsigned char out[SPONGENT160_STATE_SIZE], - const unsigned char in[SPONGENT160_STATE_SIZE]) -{ - unsigned char temp = - leftRotate3_8(in[0]) ^ (in[3] << 7) ^ (in[13] >> 7); - unsigned index; - for (index = 0; index < SPONGENT160_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[SPONGENT160_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Dumbo. - * - * \param state Points to the Spongent-pi[160] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void dumbo_process_ad - (spongent160_state_t *state, - unsigned char mask[SPONGENT160_STATE_SIZE], - unsigned char next[SPONGENT160_STATE_SIZE], - unsigned char tag[DUMBO_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - dumbo_lfsr(next, mask); - dumbo_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state->B, npub, DUMBO_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = DUMBO_NONCE_SIZE; - while (adlen > 0) { - size = SPONGENT160_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - spongent160_permute(state); - lw_xor_block(state->B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state->B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, DUMBO_TAG_SIZE); - dumbo_lfsr(mask, mask); - dumbo_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, SPONGENT160_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - spongent160_permute(state); - lw_xor_block(state->B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state->B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, DUMBO_TAG_SIZE); -} - -int dumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - spongent160_state_t state; - unsigned char start[SPONGENT160_STATE_SIZE]; - unsigned char mask[SPONGENT160_STATE_SIZE]; - unsigned char next[SPONGENT160_STATE_SIZE]; - unsigned char tag[DUMBO_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DUMBO_KEY_SIZE); - memset(state.B + DUMBO_KEY_SIZE, 0, sizeof(state.B) - DUMBO_KEY_SIZE); - spongent160_permute(&state); - memcpy(mask, state.B, DUMBO_KEY_SIZE); - memset(mask + DUMBO_KEY_SIZE, 0, sizeof(mask) - DUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - dumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= SPONGENT160_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, m, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - memcpy(c, state.B, SPONGENT160_STATE_SIZE); - - /* Authenticate using the next mask */ - dumbo_lfsr(next, mask); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT160_STATE_SIZE); - c += SPONGENT160_STATE_SIZE; - m += SPONGENT160_STATE_SIZE; - mlen -= SPONGENT160_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - dumbo_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, SPONGENT160_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - c += temp; - } else if (*clen != DUMBO_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - state.B[0] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, DUMBO_TAG_SIZE); - return 0; -} - -int dumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - spongent160_state_t state; - unsigned char *mtemp = m; - unsigned char start[SPONGENT160_STATE_SIZE]; - unsigned char mask[SPONGENT160_STATE_SIZE]; - unsigned char next[SPONGENT160_STATE_SIZE]; - unsigned char tag[DUMBO_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DUMBO_TAG_SIZE) - return -1; - *mlen = clen - DUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DUMBO_KEY_SIZE); - memset(state.B + DUMBO_KEY_SIZE, 0, sizeof(state.B) - DUMBO_KEY_SIZE); - spongent160_permute(&state); - memcpy(mask, state.B, DUMBO_KEY_SIZE); - memset(mask + DUMBO_KEY_SIZE, 0, sizeof(mask) - DUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - dumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= DUMBO_TAG_SIZE; - while (clen >= SPONGENT160_STATE_SIZE) { - /* Authenticate using the next mask */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, c, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, SPONGENT160_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT160_STATE_SIZE); - c += SPONGENT160_STATE_SIZE; - m += SPONGENT160_STATE_SIZE; - clen -= SPONGENT160_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - state.B[0] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, DUMBO_TAG_SIZE); -} - -/** - * \brief Applies the Jumbo LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void jumbo_lfsr - (unsigned char out[SPONGENT176_STATE_SIZE], - const unsigned char in[SPONGENT176_STATE_SIZE]) -{ - unsigned char temp = - leftRotate1_8(in[0]) ^ (in[3] << 7) ^ (in[19] >> 7); - unsigned index; - for (index = 0; index < SPONGENT176_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[SPONGENT176_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Jumbo. - * - * \param state Points to the Spongent-pi[170] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void jumbo_process_ad - (spongent176_state_t *state, - unsigned char mask[SPONGENT176_STATE_SIZE], - unsigned char next[SPONGENT176_STATE_SIZE], - unsigned char tag[JUMBO_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - jumbo_lfsr(next, mask); - jumbo_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state->B, npub, JUMBO_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = JUMBO_NONCE_SIZE; - while (adlen > 0) { - size = SPONGENT176_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - spongent176_permute(state); - lw_xor_block(state->B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state->B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, JUMBO_TAG_SIZE); - jumbo_lfsr(mask, mask); - jumbo_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, SPONGENT176_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - spongent176_permute(state); - lw_xor_block(state->B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state->B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, JUMBO_TAG_SIZE); -} - -int jumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - spongent176_state_t state; - unsigned char start[SPONGENT176_STATE_SIZE]; - unsigned char mask[SPONGENT176_STATE_SIZE]; - unsigned char next[SPONGENT176_STATE_SIZE]; - unsigned char tag[JUMBO_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + JUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, JUMBO_KEY_SIZE); - memset(state.B + JUMBO_KEY_SIZE, 0, sizeof(state.B) - JUMBO_KEY_SIZE); - spongent176_permute(&state); - memcpy(mask, state.B, JUMBO_KEY_SIZE); - memset(mask + JUMBO_KEY_SIZE, 0, sizeof(mask) - JUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - jumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= SPONGENT176_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, m, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - memcpy(c, state.B, SPONGENT176_STATE_SIZE); - - /* Authenticate using the next mask */ - jumbo_lfsr(next, mask); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT176_STATE_SIZE); - c += SPONGENT176_STATE_SIZE; - m += SPONGENT176_STATE_SIZE; - mlen -= SPONGENT176_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - jumbo_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, SPONGENT176_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - c += temp; - } else if (*clen != JUMBO_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - state.B[0] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, JUMBO_TAG_SIZE); - return 0; -} - -int jumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - spongent176_state_t state; - unsigned char *mtemp = m; - unsigned char start[SPONGENT176_STATE_SIZE]; - unsigned char mask[SPONGENT176_STATE_SIZE]; - unsigned char next[SPONGENT176_STATE_SIZE]; - unsigned char tag[JUMBO_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < JUMBO_TAG_SIZE) - return -1; - *mlen = clen - JUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, JUMBO_KEY_SIZE); - memset(state.B + JUMBO_KEY_SIZE, 0, sizeof(state.B) - JUMBO_KEY_SIZE); - spongent176_permute(&state); - memcpy(mask, state.B, JUMBO_KEY_SIZE); - memset(mask + JUMBO_KEY_SIZE, 0, sizeof(mask) - JUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - jumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= JUMBO_TAG_SIZE; - while (clen >= SPONGENT176_STATE_SIZE) { - /* Authenticate using the next mask */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, c, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, SPONGENT176_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT176_STATE_SIZE); - c += SPONGENT176_STATE_SIZE; - m += SPONGENT176_STATE_SIZE; - clen -= SPONGENT176_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - state.B[0] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, JUMBO_TAG_SIZE); -} - -/** - * \brief Applies the Delirium LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void delirium_lfsr - (unsigned char out[KECCAKP_200_STATE_SIZE], - const unsigned char in[KECCAKP_200_STATE_SIZE]) -{ - unsigned char temp = - leftRotate1_8(in[0]) ^ leftRotate1_8(in[2]) ^ (in[13] << 1); - unsigned index; - for (index = 0; index < KECCAKP_200_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[KECCAKP_200_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Delirium. - * - * \param state Points to the Keccak[200] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void delirium_process_ad - (keccakp_200_state_t *state, - unsigned char mask[KECCAKP_200_STATE_SIZE], - unsigned char next[KECCAKP_200_STATE_SIZE], - unsigned char tag[DELIRIUM_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - delirium_lfsr(next, mask); - delirium_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state->B, npub, DELIRIUM_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = DELIRIUM_NONCE_SIZE; - while (adlen > 0) { - size = KECCAKP_200_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - keccakp_200_permute(state); - lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); - delirium_lfsr(mask, mask); - delirium_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, KECCAKP_200_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - keccakp_200_permute(state); - lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); -} - -int delirium_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - keccakp_200_state_t state; - unsigned char start[KECCAKP_200_STATE_SIZE]; - unsigned char mask[KECCAKP_200_STATE_SIZE]; - unsigned char next[KECCAKP_200_STATE_SIZE]; - unsigned char tag[DELIRIUM_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DELIRIUM_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DELIRIUM_KEY_SIZE); - memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state); - memcpy(mask, state.B, DELIRIUM_KEY_SIZE); - memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - delirium_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= KECCAKP_200_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, m, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - memcpy(c, state.B, KECCAKP_200_STATE_SIZE); - - /* Authenticate using the next mask */ - delirium_lfsr(next, mask); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, KECCAKP_200_STATE_SIZE); - c += KECCAKP_200_STATE_SIZE; - m += KECCAKP_200_STATE_SIZE; - mlen -= KECCAKP_200_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - delirium_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, KECCAKP_200_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - c += temp; - } else if (*clen != DELIRIUM_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - state.B[0] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, DELIRIUM_TAG_SIZE); - return 0; -} - -int delirium_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - keccakp_200_state_t state; - unsigned char *mtemp = m; - unsigned char start[KECCAKP_200_STATE_SIZE]; - unsigned char mask[KECCAKP_200_STATE_SIZE]; - unsigned char next[KECCAKP_200_STATE_SIZE]; - unsigned char tag[DELIRIUM_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DELIRIUM_TAG_SIZE) - return -1; - *mlen = clen - DELIRIUM_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DELIRIUM_KEY_SIZE); - memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state); - memcpy(mask, state.B, DELIRIUM_KEY_SIZE); - memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - delirium_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= DELIRIUM_TAG_SIZE; - while (clen >= KECCAKP_200_STATE_SIZE) { - /* Authenticate using the next mask */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, c, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, KECCAKP_200_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, KECCAKP_200_STATE_SIZE); - c += KECCAKP_200_STATE_SIZE; - m += KECCAKP_200_STATE_SIZE; - clen -= KECCAKP_200_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - state.B[0] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, DELIRIUM_TAG_SIZE); -} diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.h deleted file mode 100644 index f775e3d..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/elephant.h +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ELEPHANT_H -#define LWCRYPTO_ELEPHANT_H - -#include "aead-common.h" - -/** - * \file elephant.h - * \brief Elephant authenticated encryption algorithm family. - * - * Elephant is a family of authenticated encryption algorithms based - * around the Spongent-pi and Keccak permutations. - * - * \li Dumbo has a 128-bit key, a 96-bit nonce, and a 64-bit authentication - * tag. It is based around the Spongent-pi[160] permutation. This is - * the primary member of the family. - * \li Jumbo has a 128-bit key, a 96-bit nonce, and a 64-bit authentication - * tag. It is based around the Spongent-pi[176] permutation. - * \li Delirium has a 128-bit key, a 96-bit nonce, and a 128-bit authentication - * tag. It is based around the Keccak[200] permutation. - * - * References: https://www.esat.kuleuven.be/cosic/elephant/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Dumbo. - */ -#define DUMBO_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Dumbo. - */ -#define DUMBO_TAG_SIZE 8 - -/** - * \brief Size of the nonce for Dumbo. - */ -#define DUMBO_NONCE_SIZE 12 - -/** - * \brief Size of the key for Jumbo. - */ -#define JUMBO_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Jumbo. - */ -#define JUMBO_TAG_SIZE 8 - -/** - * \brief Size of the nonce for Jumbo. - */ -#define JUMBO_NONCE_SIZE 12 - -/** - * \brief Size of the key for Delirium. - */ -#define DELIRIUM_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Delirium. - */ -#define DELIRIUM_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Delirium. - */ -#define DELIRIUM_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Dumbo cipher. - */ -extern aead_cipher_t const dumbo_cipher; - -/** - * \brief Meta-information block for the Jumbo cipher. - */ -extern aead_cipher_t const jumbo_cipher; - -/** - * \brief Meta-information block for the Delirium cipher. - */ -extern aead_cipher_t const delirium_cipher; - -/** - * \brief Encrypts and authenticates a packet with Dumbo. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa dumbo_aead_decrypt() - */ -int dumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Dumbo. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa dumbo_aead_encrypt() - */ -int dumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Jumbo. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa jumbo_aead_decrypt() - */ -int jumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Jumbo. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa jumbo_aead_encrypt() - */ -int jumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Delirium. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa delirium_aead_decrypt() - */ -int delirium_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Delirium. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa delirium_aead_encrypt() - */ -int delirium_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/encrypt.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/encrypt.c deleted file mode 100644 index 89b60ae..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "elephant.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return jumbo_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return jumbo_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak-avr.S b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak-avr.S deleted file mode 100644 index e50ccaf..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak-avr.S +++ /dev/null @@ -1,1552 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global keccakp_200_permute - .type keccakp_200_permute, @function -keccakp_200_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - ldd r24,Z+24 - push r31 - push r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,130 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - mov r30,r1 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,129 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - ldi r30,136 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,10 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,137 - eor r18,r30 - rcall 82f - ldi r30,3 - eor r18,r30 - rcall 82f - ldi r30,2 - eor r18,r30 - rcall 82f - ldi r30,128 - eor r18,r30 - rjmp 420f -82: - mov r30,r18 - eor r30,r23 - eor r30,r2 - eor r30,r7 - eor r30,r12 - mov r31,r19 - eor r31,r26 - eor r31,r3 - eor r31,r8 - eor r31,r13 - mov r25,r20 - eor r25,r27 - eor r25,r4 - eor r25,r9 - eor r25,r14 - mov r16,r21 - eor r16,r28 - eor r16,r5 - eor r16,r10 - eor r16,r15 - mov r17,r22 - eor r17,r29 - eor r17,r6 - eor r17,r11 - eor r17,r24 - mov r0,r31 - lsl r0 - adc r0,r1 - eor r0,r17 - eor r18,r0 - eor r23,r0 - eor r2,r0 - eor r7,r0 - eor r12,r0 - mov r0,r25 - lsl r0 - adc r0,r1 - eor r0,r30 - eor r19,r0 - eor r26,r0 - eor r3,r0 - eor r8,r0 - eor r13,r0 - mov r0,r16 - lsl r0 - adc r0,r1 - eor r0,r31 - eor r20,r0 - eor r27,r0 - eor r4,r0 - eor r9,r0 - eor r14,r0 - mov r0,r17 - lsl r0 - adc r0,r1 - eor r0,r25 - eor r21,r0 - eor r28,r0 - eor r5,r0 - eor r10,r0 - eor r15,r0 - mov r0,r30 - lsl r0 - adc r0,r1 - eor r0,r16 - eor r22,r0 - eor r29,r0 - eor r6,r0 - eor r11,r0 - eor r24,r0 - mov r30,r19 - swap r26 - mov r19,r26 - swap r29 - mov r26,r29 - mov r0,r1 - lsr r14 - ror r0 - lsr r14 - ror r0 - lsr r14 - ror r0 - or r14,r0 - mov r29,r14 - bst r6,0 - lsr r6 - bld r6,7 - mov r14,r6 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - mov r6,r12 - mov r0,r1 - lsr r20 - ror r0 - lsr r20 - ror r0 - or r20,r0 - mov r12,r20 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - mov r20,r4 - lsl r5 - adc r5,r1 - mov r4,r5 - mov r5,r11 - mov r11,r15 - lsl r7 - adc r7,r1 - mov r15,r7 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - mov r7,r22 - mov r0,r1 - lsr r24 - ror r0 - lsr r24 - ror r0 - or r24,r0 - mov r22,r24 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r24,r13 - bst r28,0 - lsr r28 - bld r28,7 - mov r13,r28 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r28,r8 - swap r23 - mov r8,r23 - swap r21 - mov r23,r21 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r21,r10 - bst r9,0 - lsr r9 - bld r9,7 - mov r10,r9 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - mov r9,r3 - mov r0,r1 - lsr r27 - ror r0 - lsr r27 - ror r0 - or r27,r0 - mov r3,r27 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - mov r27,r2 - lsl r30 - adc r30,r1 - mov r2,r30 - mov r30,r18 - mov r31,r19 - mov r25,r20 - mov r16,r21 - mov r17,r22 - mov r18,r25 - mov r0,r31 - com r0 - and r18,r0 - eor r18,r30 - mov r19,r16 - mov r0,r25 - com r0 - and r19,r0 - eor r19,r31 - mov r20,r17 - mov r0,r16 - com r0 - and r20,r0 - eor r20,r25 - mov r21,r30 - mov r0,r17 - com r0 - and r21,r0 - eor r21,r16 - mov r22,r31 - mov r0,r30 - com r0 - and r22,r0 - eor r22,r17 - mov r30,r23 - mov r31,r26 - mov r25,r27 - mov r16,r28 - mov r17,r29 - mov r23,r25 - mov r0,r31 - com r0 - and r23,r0 - eor r23,r30 - mov r26,r16 - mov r0,r25 - com r0 - and r26,r0 - eor r26,r31 - mov r27,r17 - mov r0,r16 - com r0 - and r27,r0 - eor r27,r25 - mov r28,r30 - mov r0,r17 - com r0 - and r28,r0 - eor r28,r16 - mov r29,r31 - mov r0,r30 - com r0 - and r29,r0 - eor r29,r17 - mov r30,r2 - mov r31,r3 - mov r25,r4 - mov r16,r5 - mov r17,r6 - mov r2,r25 - mov r0,r31 - com r0 - and r2,r0 - eor r2,r30 - mov r3,r16 - mov r0,r25 - com r0 - and r3,r0 - eor r3,r31 - mov r4,r17 - mov r0,r16 - com r0 - and r4,r0 - eor r4,r25 - mov r5,r30 - mov r0,r17 - com r0 - and r5,r0 - eor r5,r16 - mov r6,r31 - mov r0,r30 - com r0 - and r6,r0 - eor r6,r17 - mov r30,r7 - mov r31,r8 - mov r25,r9 - mov r16,r10 - mov r17,r11 - mov r7,r25 - mov r0,r31 - com r0 - and r7,r0 - eor r7,r30 - mov r8,r16 - mov r0,r25 - com r0 - and r8,r0 - eor r8,r31 - mov r9,r17 - mov r0,r16 - com r0 - and r9,r0 - eor r9,r25 - mov r10,r30 - mov r0,r17 - com r0 - and r10,r0 - eor r10,r16 - mov r11,r31 - mov r0,r30 - com r0 - and r11,r0 - eor r11,r17 - mov r30,r12 - mov r31,r13 - mov r25,r14 - mov r16,r15 - mov r17,r24 - mov r12,r25 - mov r0,r31 - com r0 - and r12,r0 - eor r12,r30 - mov r13,r16 - mov r0,r25 - com r0 - and r13,r0 - eor r13,r31 - mov r14,r17 - mov r0,r16 - com r0 - and r14,r0 - eor r14,r25 - mov r15,r30 - mov r0,r17 - com r0 - and r15,r0 - eor r15,r16 - mov r24,r31 - mov r0,r30 - com r0 - and r24,r0 - eor r24,r17 - ret -420: - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - std Z+24,r24 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size keccakp_200_permute, .-keccakp_200_permute - - .text -.global keccakp_400_permute - .type keccakp_400_permute, @function -keccakp_400_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - movw r30,r24 -.L__stack_usage = 17 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - ldd r10,Z+4 - ldd r11,Z+5 - ldd r12,Z+6 - ldd r13,Z+7 - ldd r14,Z+8 - ldd r15,Z+9 - cpi r22,20 - brcs 15f - rcall 153f - ldi r23,1 - eor r6,r23 -15: - cpi r22,19 - brcs 23f - rcall 153f - ldi r23,130 - eor r6,r23 - ldi r17,128 - eor r7,r17 -23: - cpi r22,18 - brcs 31f - rcall 153f - ldi r23,138 - eor r6,r23 - ldi r17,128 - eor r7,r17 -31: - cpi r22,17 - brcs 37f - rcall 153f - ldi r23,128 - eor r7,r23 -37: - cpi r22,16 - brcs 45f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -45: - cpi r22,15 - brcs 51f - rcall 153f - ldi r23,1 - eor r6,r23 -51: - cpi r22,14 - brcs 59f - rcall 153f - ldi r23,129 - eor r6,r23 - ldi r17,128 - eor r7,r17 -59: - cpi r22,13 - brcs 67f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -67: - cpi r22,12 - brcs 73f - rcall 153f - ldi r23,138 - eor r6,r23 -73: - cpi r22,11 - brcs 79f - rcall 153f - ldi r23,136 - eor r6,r23 -79: - cpi r22,10 - brcs 87f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -87: - cpi r22,9 - brcs 93f - rcall 153f - ldi r23,10 - eor r6,r23 -93: - cpi r22,8 - brcs 101f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -101: - cpi r22,7 - brcs 107f - rcall 153f - ldi r23,139 - eor r6,r23 -107: - cpi r22,6 - brcs 115f - rcall 153f - ldi r23,137 - eor r6,r23 - ldi r17,128 - eor r7,r17 -115: - cpi r22,5 - brcs 123f - rcall 153f - ldi r23,3 - eor r6,r23 - ldi r17,128 - eor r7,r17 -123: - cpi r22,4 - brcs 131f - rcall 153f - ldi r23,2 - eor r6,r23 - ldi r17,128 - eor r7,r17 -131: - cpi r22,3 - brcs 137f - rcall 153f - ldi r23,128 - eor r6,r23 -137: - cpi r22,2 - brcs 145f - rcall 153f - ldi r23,10 - eor r6,r23 - ldi r17,128 - eor r7,r17 -145: - cpi r22,1 - brcs 151f - rcall 153f - ldi r23,10 - eor r6,r23 -151: - rjmp 1004f -153: - movw r18,r6 - ldd r0,Z+10 - eor r18,r0 - ldd r0,Z+11 - eor r19,r0 - ldd r0,Z+20 - eor r18,r0 - ldd r0,Z+21 - eor r19,r0 - ldd r0,Z+30 - eor r18,r0 - ldd r0,Z+31 - eor r19,r0 - ldd r0,Z+40 - eor r18,r0 - ldd r0,Z+41 - eor r19,r0 - movw r20,r8 - ldd r0,Z+12 - eor r20,r0 - ldd r0,Z+13 - eor r21,r0 - ldd r0,Z+22 - eor r20,r0 - ldd r0,Z+23 - eor r21,r0 - ldd r0,Z+32 - eor r20,r0 - ldd r0,Z+33 - eor r21,r0 - ldd r0,Z+42 - eor r20,r0 - ldd r0,Z+43 - eor r21,r0 - movw r26,r10 - ldd r0,Z+14 - eor r26,r0 - ldd r0,Z+15 - eor r27,r0 - ldd r0,Z+24 - eor r26,r0 - ldd r0,Z+25 - eor r27,r0 - ldd r0,Z+34 - eor r26,r0 - ldd r0,Z+35 - eor r27,r0 - ldd r0,Z+44 - eor r26,r0 - ldd r0,Z+45 - eor r27,r0 - movw r2,r12 - ldd r0,Z+16 - eor r2,r0 - ldd r0,Z+17 - eor r3,r0 - ldd r0,Z+26 - eor r2,r0 - ldd r0,Z+27 - eor r3,r0 - ldd r0,Z+36 - eor r2,r0 - ldd r0,Z+37 - eor r3,r0 - ldd r0,Z+46 - eor r2,r0 - ldd r0,Z+47 - eor r3,r0 - movw r4,r14 - ldd r0,Z+18 - eor r4,r0 - ldd r0,Z+19 - eor r5,r0 - ldd r0,Z+28 - eor r4,r0 - ldd r0,Z+29 - eor r5,r0 - ldd r0,Z+38 - eor r4,r0 - ldd r0,Z+39 - eor r5,r0 - ldd r0,Z+48 - eor r4,r0 - ldd r0,Z+49 - eor r5,r0 - movw r24,r20 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r4 - eor r25,r5 - eor r6,r24 - eor r7,r25 - ldd r0,Z+10 - eor r0,r24 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r25 - std Z+11,r0 - ldd r0,Z+20 - eor r0,r24 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r25 - std Z+21,r0 - ldd r0,Z+30 - eor r0,r24 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r25 - std Z+31,r0 - ldd r0,Z+40 - eor r0,r24 - std Z+40,r0 - ldd r0,Z+41 - eor r0,r25 - std Z+41,r0 - movw r24,r26 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r8,r24 - eor r9,r25 - ldd r0,Z+12 - eor r0,r24 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r25 - std Z+13,r0 - ldd r0,Z+22 - eor r0,r24 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r25 - std Z+23,r0 - ldd r0,Z+32 - eor r0,r24 - std Z+32,r0 - ldd r0,Z+33 - eor r0,r25 - std Z+33,r0 - ldd r0,Z+42 - eor r0,r24 - std Z+42,r0 - ldd r0,Z+43 - eor r0,r25 - std Z+43,r0 - movw r24,r2 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r10,r24 - eor r11,r25 - ldd r0,Z+14 - eor r0,r24 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r25 - std Z+15,r0 - ldd r0,Z+24 - eor r0,r24 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r25 - std Z+25,r0 - ldd r0,Z+34 - eor r0,r24 - std Z+34,r0 - ldd r0,Z+35 - eor r0,r25 - std Z+35,r0 - ldd r0,Z+44 - eor r0,r24 - std Z+44,r0 - ldd r0,Z+45 - eor r0,r25 - std Z+45,r0 - movw r24,r4 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r26 - eor r25,r27 - eor r12,r24 - eor r13,r25 - ldd r0,Z+16 - eor r0,r24 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r25 - std Z+17,r0 - ldd r0,Z+26 - eor r0,r24 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r25 - std Z+27,r0 - ldd r0,Z+36 - eor r0,r24 - std Z+36,r0 - ldd r0,Z+37 - eor r0,r25 - std Z+37,r0 - ldd r0,Z+46 - eor r0,r24 - std Z+46,r0 - ldd r0,Z+47 - eor r0,r25 - std Z+47,r0 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r2 - eor r25,r3 - eor r14,r24 - eor r15,r25 - ldd r0,Z+18 - eor r0,r24 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r25 - std Z+19,r0 - ldd r0,Z+28 - eor r0,r24 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r25 - std Z+29,r0 - ldd r0,Z+38 - eor r0,r24 - std Z+38,r0 - ldd r0,Z+39 - eor r0,r25 - std Z+39,r0 - ldd r0,Z+48 - eor r0,r24 - std Z+48,r0 - ldd r0,Z+49 - eor r0,r25 - std Z+49,r0 - movw r24,r8 - ldd r8,Z+12 - ldd r9,Z+13 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldd r18,Z+18 - ldd r19,Z+19 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+12,r18 - std Z+13,r19 - ldd r18,Z+44 - ldd r19,Z+45 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+18,r18 - std Z+19,r19 - ldd r18,Z+28 - ldd r19,Z+29 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+44,r18 - std Z+45,r19 - ldd r18,Z+40 - ldd r19,Z+41 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+28,r18 - std Z+29,r19 - movw r18,r10 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+40,r18 - std Z+41,r19 - ldd r10,Z+24 - ldd r11,Z+25 - mov r0,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldd r18,Z+26 - ldd r19,Z+27 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+24,r18 - std Z+25,r19 - ldd r18,Z+38 - ldd r19,Z+39 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+26,r18 - std Z+27,r19 - ldd r18,Z+46 - ldd r19,Z+47 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+38,r18 - std Z+39,r19 - ldd r18,Z+30 - ldd r19,Z+31 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+46,r18 - std Z+47,r19 - movw r18,r14 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+30,r18 - std Z+31,r19 - ldd r14,Z+48 - ldd r15,Z+49 - mov r0,r1 - lsr r15 - ror r14 - ror r0 - lsr r15 - ror r14 - ror r0 - or r15,r0 - ldd r18,Z+42 - ldd r19,Z+43 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+48,r18 - std Z+49,r19 - ldd r18,Z+16 - ldd r19,Z+17 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+42,r18 - std Z+43,r19 - ldd r18,Z+32 - ldd r19,Z+33 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+16,r18 - std Z+17,r19 - ldd r18,Z+10 - ldd r19,Z+11 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+32,r18 - std Z+33,r19 - movw r18,r12 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+10,r18 - std Z+11,r19 - ldd r12,Z+36 - ldd r13,Z+37 - mov r0,r13 - mov r13,r12 - mov r12,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - ldd r18,Z+34 - ldd r19,Z+35 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+36,r18 - std Z+37,r19 - ldd r18,Z+22 - ldd r19,Z+23 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+34,r18 - std Z+35,r19 - ldd r18,Z+14 - ldd r19,Z+15 - mov r0,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+22,r18 - std Z+23,r19 - ldd r18,Z+20 - ldd r19,Z+21 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+14,r18 - std Z+15,r19 - lsl r24 - rol r25 - adc r24,r1 - std Z+20,r24 - std Z+21,r25 - movw r18,r6 - movw r20,r8 - movw r26,r10 - movw r2,r12 - movw r4,r14 - movw r6,r26 - mov r0,r20 - com r0 - and r6,r0 - mov r0,r21 - com r0 - and r7,r0 - eor r6,r18 - eor r7,r19 - movw r8,r2 - mov r0,r26 - com r0 - and r8,r0 - mov r0,r27 - com r0 - and r9,r0 - eor r8,r20 - eor r9,r21 - movw r10,r4 - mov r0,r2 - com r0 - and r10,r0 - mov r0,r3 - com r0 - and r11,r0 - eor r10,r26 - eor r11,r27 - movw r12,r18 - mov r0,r4 - com r0 - and r12,r0 - mov r0,r5 - com r0 - and r13,r0 - eor r12,r2 - eor r13,r3 - movw r14,r20 - mov r0,r18 - com r0 - and r14,r0 - mov r0,r19 - com r0 - and r15,r0 - eor r14,r4 - eor r15,r5 - ldd r18,Z+10 - ldd r19,Z+11 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+10,r24 - std Z+11,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+12,r24 - std Z+13,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+14,r24 - std Z+15,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+16,r24 - std Z+17,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+18,r24 - std Z+19,r25 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+20,r24 - std Z+21,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+22,r24 - std Z+23,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+24,r24 - std Z+25,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+26,r24 - std Z+27,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+28,r24 - std Z+29,r25 - ldd r18,Z+30 - ldd r19,Z+31 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+30,r24 - std Z+31,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+32,r24 - std Z+33,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+34,r24 - std Z+35,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+36,r24 - std Z+37,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+38,r24 - std Z+39,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - ldd r4,Z+48 - ldd r5,Z+49 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+40,r24 - std Z+41,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+42,r24 - std Z+43,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+44,r24 - std Z+45,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+46,r24 - std Z+47,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+48,r24 - std Z+49,r25 - ret -1004: - st Z,r6 - std Z+1,r7 - std Z+2,r8 - std Z+3,r9 - std Z+4,r10 - std Z+5,r11 - std Z+6,r12 - std Z+7,r13 - std Z+8,r14 - std Z+9,r15 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size keccakp_400_permute, .-keccakp_400_permute - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.c deleted file mode 100644 index 60539df..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-keccak.h" - -#if !defined(__AVR__) - -/* Faster method to compute ((x + y) % 5) that avoids the division */ -static unsigned char const addMod5Table[9] = { - 0, 1, 2, 3, 4, 0, 1, 2, 3 -}; -#define addMod5(x, y) (addMod5Table[(x) + (y)]) - -void keccakp_200_permute(keccakp_200_state_t *state) -{ - static uint8_t const RC[18] = { - 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, - 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, - 0x02, 0x80 - }; - uint8_t C[5]; - uint8_t D; - unsigned round; - unsigned index, index2; - for (round = 0; round < 18; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_8(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate4_8(state->A[1][1]); - state->A[1][1] = leftRotate4_8(state->A[1][4]); - state->A[1][4] = leftRotate5_8(state->A[4][2]); - state->A[4][2] = leftRotate7_8(state->A[2][4]); - state->A[2][4] = leftRotate2_8(state->A[4][0]); - state->A[4][0] = leftRotate6_8(state->A[0][2]); - state->A[0][2] = leftRotate3_8(state->A[2][2]); - state->A[2][2] = leftRotate1_8(state->A[2][3]); - state->A[2][3] = state->A[3][4]; - state->A[3][4] = state->A[4][3]; - state->A[4][3] = leftRotate1_8(state->A[3][0]); - state->A[3][0] = leftRotate3_8(state->A[0][4]); - state->A[0][4] = leftRotate6_8(state->A[4][4]); - state->A[4][4] = leftRotate2_8(state->A[4][1]); - state->A[4][1] = leftRotate7_8(state->A[1][3]); - state->A[1][3] = leftRotate5_8(state->A[3][1]); - state->A[3][1] = leftRotate4_8(state->A[1][0]); - state->A[1][0] = leftRotate4_8(state->A[0][3]); - state->A[0][3] = leftRotate5_8(state->A[3][3]); - state->A[3][3] = leftRotate7_8(state->A[3][2]); - state->A[3][2] = leftRotate2_8(state->A[2][1]); - state->A[2][1] = leftRotate6_8(state->A[1][2]); - state->A[1][2] = leftRotate3_8(state->A[2][0]); - state->A[2][0] = leftRotate1_8(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define keccakp_400_permute_host keccakp_400_permute -#endif - -/* Keccak-p[400] that assumes that the input is already in host byte order */ -void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) -{ - static uint16_t const RC[20] = { - 0x0001, 0x8082, 0x808A, 0x8000, 0x808B, 0x0001, 0x8081, 0x8009, - 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, - 0x8002, 0x0080, 0x800A, 0x000A - }; - uint16_t C[5]; - uint16_t D; - unsigned round; - unsigned index, index2; - for (round = 20 - rounds; round < 20; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_16(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate12_16(state->A[1][1]); - state->A[1][1] = leftRotate4_16 (state->A[1][4]); - state->A[1][4] = leftRotate13_16(state->A[4][2]); - state->A[4][2] = leftRotate7_16 (state->A[2][4]); - state->A[2][4] = leftRotate2_16 (state->A[4][0]); - state->A[4][0] = leftRotate14_16(state->A[0][2]); - state->A[0][2] = leftRotate11_16(state->A[2][2]); - state->A[2][2] = leftRotate9_16 (state->A[2][3]); - state->A[2][3] = leftRotate8_16 (state->A[3][4]); - state->A[3][4] = leftRotate8_16 (state->A[4][3]); - state->A[4][3] = leftRotate9_16 (state->A[3][0]); - state->A[3][0] = leftRotate11_16(state->A[0][4]); - state->A[0][4] = leftRotate14_16(state->A[4][4]); - state->A[4][4] = leftRotate2_16 (state->A[4][1]); - state->A[4][1] = leftRotate7_16 (state->A[1][3]); - state->A[1][3] = leftRotate13_16(state->A[3][1]); - state->A[3][1] = leftRotate4_16 (state->A[1][0]); - state->A[1][0] = leftRotate12_16(state->A[0][3]); - state->A[0][3] = leftRotate5_16 (state->A[3][3]); - state->A[3][3] = leftRotate15_16(state->A[3][2]); - state->A[3][2] = leftRotate10_16(state->A[2][1]); - state->A[2][1] = leftRotate6_16 (state->A[1][2]); - state->A[1][2] = leftRotate3_16 (state->A[2][0]); - state->A[2][0] = leftRotate1_16(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if !defined(LW_UTIL_LITTLE_ENDIAN) - -/** - * \brief Reverses the bytes in a Keccak-p[400] state. - * - * \param state The Keccak-p[400] state to apply byte-reversal to. - */ -static void keccakp_400_reverse_bytes(keccakp_400_state_t *state) -{ - unsigned index; - unsigned char temp1; - unsigned char temp2; - for (index = 0; index < 50; index += 2) { - temp1 = state->B[index]; - temp2 = state->B[index + 1]; - state->B[index] = temp2; - state->B[index + 1] = temp1; - } -} - -/* Keccak-p[400] that requires byte reversal on input and output */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) -{ - keccakp_400_reverse_bytes(state); - keccakp_400_permute_host(state, rounds); - keccakp_400_reverse_bytes(state); -} - -#endif - -#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.h deleted file mode 100644 index 2ffef42..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-keccak.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KECCAK_H -#define LW_INTERNAL_KECCAK_H - -#include "internal-util.h" - -/** - * \file internal-keccak.h - * \brief Internal implementation of the Keccak-p permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for the Keccak-p[200] permutation. - */ -#define KECCAKP_200_STATE_SIZE 25 - -/** - * \brief Size of the state for the Keccak-p[400] permutation. - */ -#define KECCAKP_400_STATE_SIZE 50 - -/** - * \brief Structure of the internal state of the Keccak-p[200] permutation. - */ -typedef union -{ - uint8_t A[5][5]; /**< Keccak-p[200] state as a 5x5 array of lanes */ - uint8_t B[25]; /**< Keccak-p[200] state as a byte array */ - -} keccakp_200_state_t; - -/** - * \brief Structure of the internal state of the Keccak-p[400] permutation. - */ -typedef union -{ - uint16_t A[5][5]; /**< Keccak-p[400] state as a 5x5 array of lanes */ - uint8_t B[50]; /**< Keccak-p[400] state as a byte array */ - -} keccakp_400_state_t; - -/** - * \brief Permutes the Keccak-p[200] state. - * - * \param state The Keccak-p[200] state to be permuted. - */ -void keccakp_200_permute(keccakp_200_state_t *state); - -/** - * \brief Permutes the Keccak-p[400] state, which is assumed to be in - * little-endian byte order. - * - * \param state The Keccak-p[400] state to be permuted. - * \param rounds The number of rounds to perform (up to 20). - */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent-avr.S b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent-avr.S deleted file mode 100644 index 4a43458..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent-avr.S +++ /dev/null @@ -1,1677 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 238 - .byte 237 - .byte 235 - .byte 224 - .byte 226 - .byte 225 - .byte 228 - .byte 239 - .byte 231 - .byte 234 - .byte 232 - .byte 229 - .byte 233 - .byte 236 - .byte 227 - .byte 230 - .byte 222 - .byte 221 - .byte 219 - .byte 208 - .byte 210 - .byte 209 - .byte 212 - .byte 223 - .byte 215 - .byte 218 - .byte 216 - .byte 213 - .byte 217 - .byte 220 - .byte 211 - .byte 214 - .byte 190 - .byte 189 - .byte 187 - .byte 176 - .byte 178 - .byte 177 - .byte 180 - .byte 191 - .byte 183 - .byte 186 - .byte 184 - .byte 181 - .byte 185 - .byte 188 - .byte 179 - .byte 182 - .byte 14 - .byte 13 - .byte 11 - .byte 0 - .byte 2 - .byte 1 - .byte 4 - .byte 15 - .byte 7 - .byte 10 - .byte 8 - .byte 5 - .byte 9 - .byte 12 - .byte 3 - .byte 6 - .byte 46 - .byte 45 - .byte 43 - .byte 32 - .byte 34 - .byte 33 - .byte 36 - .byte 47 - .byte 39 - .byte 42 - .byte 40 - .byte 37 - .byte 41 - .byte 44 - .byte 35 - .byte 38 - .byte 30 - .byte 29 - .byte 27 - .byte 16 - .byte 18 - .byte 17 - .byte 20 - .byte 31 - .byte 23 - .byte 26 - .byte 24 - .byte 21 - .byte 25 - .byte 28 - .byte 19 - .byte 22 - .byte 78 - .byte 77 - .byte 75 - .byte 64 - .byte 66 - .byte 65 - .byte 68 - .byte 79 - .byte 71 - .byte 74 - .byte 72 - .byte 69 - .byte 73 - .byte 76 - .byte 67 - .byte 70 - .byte 254 - .byte 253 - .byte 251 - .byte 240 - .byte 242 - .byte 241 - .byte 244 - .byte 255 - .byte 247 - .byte 250 - .byte 248 - .byte 245 - .byte 249 - .byte 252 - .byte 243 - .byte 246 - .byte 126 - .byte 125 - .byte 123 - .byte 112 - .byte 114 - .byte 113 - .byte 116 - .byte 127 - .byte 119 - .byte 122 - .byte 120 - .byte 117 - .byte 121 - .byte 124 - .byte 115 - .byte 118 - .byte 174 - .byte 173 - .byte 171 - .byte 160 - .byte 162 - .byte 161 - .byte 164 - .byte 175 - .byte 167 - .byte 170 - .byte 168 - .byte 165 - .byte 169 - .byte 172 - .byte 163 - .byte 166 - .byte 142 - .byte 141 - .byte 139 - .byte 128 - .byte 130 - .byte 129 - .byte 132 - .byte 143 - .byte 135 - .byte 138 - .byte 136 - .byte 133 - .byte 137 - .byte 140 - .byte 131 - .byte 134 - .byte 94 - .byte 93 - .byte 91 - .byte 80 - .byte 82 - .byte 81 - .byte 84 - .byte 95 - .byte 87 - .byte 90 - .byte 88 - .byte 85 - .byte 89 - .byte 92 - .byte 83 - .byte 86 - .byte 158 - .byte 157 - .byte 155 - .byte 144 - .byte 146 - .byte 145 - .byte 148 - .byte 159 - .byte 151 - .byte 154 - .byte 152 - .byte 149 - .byte 153 - .byte 156 - .byte 147 - .byte 150 - .byte 206 - .byte 205 - .byte 203 - .byte 192 - .byte 194 - .byte 193 - .byte 196 - .byte 207 - .byte 199 - .byte 202 - .byte 200 - .byte 197 - .byte 201 - .byte 204 - .byte 195 - .byte 198 - .byte 62 - .byte 61 - .byte 59 - .byte 48 - .byte 50 - .byte 49 - .byte 52 - .byte 63 - .byte 55 - .byte 58 - .byte 56 - .byte 53 - .byte 57 - .byte 60 - .byte 51 - .byte 54 - .byte 110 - .byte 109 - .byte 107 - .byte 96 - .byte 98 - .byte 97 - .byte 100 - .byte 111 - .byte 103 - .byte 106 - .byte 104 - .byte 101 - .byte 105 - .byte 108 - .byte 99 - .byte 102 - - .text -.global spongent160_permute - .type spongent160_permute, @function -spongent160_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 -.L__stack_usage = 16 - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - ldd r10,Z+12 - ldd r11,Z+13 - ldd r12,Z+14 - ldd r13,Z+15 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r24,Z+18 - ldd r25,Z+19 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r21,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r21 -#endif - ldi r18,80 - ldi r19,117 - ldi r20,174 -25: - eor r22,r19 - eor r25,r20 - lsl r19 - bst r19,7 - bld r19,0 - mov r0,r1 - bst r19,6 - bld r0,0 - eor r19,r0 - andi r19,127 - lsr r20 - bst r20,0 - bld r20,7 - mov r0,r1 - bst r20,1 - bld r0,7 - eor r20,r0 - andi r20,254 - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r28 -#if defined(RAMPZ) - elpm r28,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r28,Z -#elif defined(__AVR_TINY__) - ld r28,Z -#else - lpm - mov r28,r0 -#endif - mov r30,r29 -#if defined(RAMPZ) - elpm r29,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r29,Z -#elif defined(__AVR_TINY__) - ld r29,Z -#else - lpm - mov r29,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r28,0 - bld r22,4 - bst r6,0 - bld r28,0 - bst r10,1 - bld r6,0 - bst r6,6 - bld r10,1 - bst r13,1 - bld r6,6 - bst r22,7 - bld r13,1 - bst r29,4 - bld r22,7 - bst r12,0 - bld r29,4 - bst r14,2 - bld r12,0 - bst r3,3 - bld r14,2 - bst r23,5 - bld r3,3 - bst r4,4 - bld r23,5 - bst r4,1 - bld r4,4 - bst r2,5 - bld r4,1 - bst r24,4 - bld r2,5 - bst r12,3 - bld r24,4 - bst r15,6 - bld r12,3 - bst r9,3 - bld r15,6 - bst r3,6 - bld r9,3 - bst r29,1 - bld r3,6 - bst r10,4 - bld r29,1 - bst r8,2 - bld r10,4 - bst r23,2 - bld r8,2 - bst r3,0 - bld r23,2 - bst r0,0 - bld r3,0 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r2,0 - bld r23,0 - bst r14,0 - bld r2,0 - bst r2,3 - bld r14,0 - bst r15,4 - bld r2,3 - bst r8,3 - bld r15,4 - bst r23,6 - bld r8,3 - bst r5,0 - bld r23,6 - bst r6,1 - bld r5,0 - bst r10,5 - bld r6,1 - bst r8,6 - bld r10,5 - bst r29,2 - bld r8,6 - bst r11,0 - bld r29,2 - bst r10,2 - bld r11,0 - bst r7,2 - bld r10,2 - bst r15,1 - bld r7,2 - bst r6,7 - bld r15,1 - bst r13,5 - bld r6,7 - bst r28,7 - bld r13,5 - bst r9,4 - bld r28,7 - bst r4,2 - bld r9,4 - bst r3,1 - bld r4,2 - bst r22,5 - bld r3,1 - bst r28,4 - bld r22,5 - bst r8,0 - bld r28,4 - bst r0,0 - bld r8,0 - bst r22,3 - bld r0,0 - bst r23,4 - bld r22,3 - bst r4,0 - bld r23,4 - bst r2,1 - bld r4,0 - bst r14,4 - bld r2,1 - bst r4,3 - bld r14,4 - bst r3,5 - bld r4,3 - bst r28,5 - bld r3,5 - bst r8,4 - bld r28,5 - bst r28,2 - bld r8,4 - bst r7,0 - bld r28,2 - bst r14,1 - bld r7,0 - bst r2,7 - bld r14,1 - bst r25,4 - bld r2,7 - bst r24,3 - bld r25,4 - bst r11,7 - bld r24,3 - bst r13,6 - bld r11,7 - bst r29,3 - bld r13,6 - bst r11,4 - bld r29,3 - bst r12,2 - bld r11,4 - bst r15,2 - bld r12,2 - bst r7,3 - bld r15,2 - bst r15,5 - bld r7,3 - bst r8,7 - bld r15,5 - bst r29,6 - bld r8,7 - bst r13,0 - bld r29,6 - bst r0,0 - bld r13,0 - bst r22,6 - bld r0,0 - bst r29,0 - bld r22,6 - bst r10,0 - bld r29,0 - bst r6,2 - bld r10,0 - bst r11,1 - bld r6,2 - bst r10,6 - bld r11,1 - bst r9,2 - bld r10,6 - bst r3,2 - bld r9,2 - bst r23,1 - bld r3,2 - bst r2,4 - bld r23,1 - bst r24,0 - bld r2,4 - bst r10,3 - bld r24,0 - bst r7,6 - bld r10,3 - bst r25,1 - bld r7,6 - bst r14,7 - bld r25,1 - bst r5,7 - bld r14,7 - bst r9,5 - bld r5,7 - bst r4,6 - bld r9,5 - bst r5,1 - bld r4,6 - bst r6,5 - bld r5,1 - bst r12,5 - bld r6,5 - bst r24,6 - bld r12,5 - bst r13,3 - bld r24,6 - bst r23,7 - bld r13,3 - bst r5,4 - bld r23,7 - bst r8,1 - bld r5,4 - bst r0,0 - bld r8,1 - bst r23,3 - bld r0,0 - bst r3,4 - bld r23,3 - bst r28,1 - bld r3,4 - bst r6,4 - bld r28,1 - bst r12,1 - bld r6,4 - bst r14,6 - bld r12,1 - bst r5,3 - bld r14,6 - bst r7,5 - bld r5,3 - bst r24,5 - bld r7,5 - bst r12,7 - bld r24,5 - bst r25,6 - bld r12,7 - bst r25,3 - bld r25,6 - bst r15,7 - bld r25,3 - bst r9,7 - bld r15,7 - bst r5,6 - bld r9,7 - bst r9,1 - bld r5,6 - bst r2,6 - bld r9,1 - bst r25,0 - bld r2,6 - bst r14,3 - bld r25,0 - bst r3,7 - bld r14,3 - bst r29,5 - bld r3,7 - bst r12,4 - bld r29,5 - bst r24,2 - bld r12,4 - bst r11,3 - bld r24,2 - bst r11,6 - bld r11,3 - bst r13,2 - bld r11,6 - bst r0,0 - bld r13,2 - bst r28,3 - bld r0,0 - bst r7,4 - bld r28,3 - bst r24,1 - bld r7,4 - bst r10,7 - bld r24,1 - bst r9,6 - bld r10,7 - bst r5,2 - bld r9,6 - bst r7,1 - bld r5,2 - bst r14,5 - bld r7,1 - bst r4,7 - bld r14,5 - bst r5,5 - bld r4,7 - bst r8,5 - bld r5,5 - bst r28,6 - bld r8,5 - bst r9,0 - bld r28,6 - bst r2,2 - bld r9,0 - bst r15,0 - bld r2,2 - bst r6,3 - bld r15,0 - bst r11,5 - bld r6,3 - bst r12,6 - bld r11,5 - bst r25,2 - bld r12,6 - bst r15,3 - bld r25,2 - bst r7,7 - bld r15,3 - bst r25,5 - bld r7,7 - bst r24,7 - bld r25,5 - bst r13,7 - bld r24,7 - bst r29,7 - bld r13,7 - bst r13,4 - bld r29,7 - bst r0,0 - bld r13,4 - dec r18 - breq 5389f - rjmp 25b -5389: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - st X+,r22 - st X+,r23 - st X+,r28 - st X+,r29 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size spongent160_permute, .-spongent160_permute - - .text -.global spongent176_permute - .type spongent176_permute, @function -spongent176_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - ldd r10,Z+12 - ldd r11,Z+13 - ldd r12,Z+14 - ldd r13,Z+15 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r24,Z+18 - ldd r25,Z+19 - ldd r16,Z+20 - ldd r17,Z+21 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r21,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r21 -#endif - ldi r18,90 - ldi r19,69 - ldi r20,162 -27: - eor r22,r19 - eor r17,r20 - lsl r19 - bst r19,7 - bld r19,0 - mov r0,r1 - bst r19,6 - bld r0,0 - eor r19,r0 - andi r19,127 - lsr r20 - bst r20,0 - bld r20,7 - mov r0,r1 - bst r20,1 - bld r0,7 - eor r20,r0 - andi r20,254 - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r28 -#if defined(RAMPZ) - elpm r28,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r28,Z -#elif defined(__AVR_TINY__) - ld r28,Z -#else - lpm - mov r28,r0 -#endif - mov r30,r29 -#if defined(RAMPZ) - elpm r29,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r29,Z -#elif defined(__AVR_TINY__) - ld r29,Z -#else - lpm - mov r29,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r28,0 - bld r22,4 - bst r6,0 - bld r28,0 - bst r8,1 - bld r6,0 - bst r24,5 - bld r8,1 - bst r6,7 - bld r24,5 - bst r11,5 - bld r6,7 - bst r8,6 - bld r11,5 - bst r17,1 - bld r8,6 - bst r24,7 - bld r17,1 - bst r7,7 - bld r24,7 - bst r15,5 - bld r7,7 - bst r2,7 - bld r15,5 - bst r25,4 - bld r2,7 - bst r10,3 - bld r25,4 - bst r3,6 - bld r10,3 - bst r23,1 - bld r3,6 - bst r2,4 - bld r23,1 - bst r24,0 - bld r2,4 - bst r4,3 - bld r24,0 - bst r29,5 - bld r4,3 - bst r12,4 - bld r29,5 - bst r12,2 - bld r12,4 - bst r11,2 - bld r12,2 - bst r7,2 - bld r11,2 - bst r13,1 - bld r7,2 - bst r14,6 - bld r13,1 - bst r23,3 - bld r14,6 - bst r3,4 - bld r23,3 - bst r0,0 - bld r3,4 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r2,0 - bld r23,0 - bst r14,0 - bld r2,0 - bst r16,2 - bld r14,0 - bst r13,3 - bld r16,2 - bst r15,6 - bld r13,3 - bst r3,3 - bld r15,6 - bst r17,4 - bld r3,3 - bst r16,3 - bld r17,4 - bst r13,7 - bld r16,3 - bst r25,6 - bld r13,7 - bst r11,3 - bld r25,6 - bst r7,6 - bld r11,3 - bst r15,1 - bld r7,6 - bst r28,7 - bld r15,1 - bst r9,4 - bld r28,7 - bst r28,2 - bld r9,4 - bst r7,0 - bld r28,2 - bst r12,1 - bld r7,0 - bst r10,6 - bld r12,1 - bst r5,2 - bld r10,6 - bst r5,1 - bld r5,2 - bst r4,5 - bld r5,1 - bst r2,5 - bld r4,5 - bst r24,4 - bld r2,5 - bst r6,3 - bld r24,4 - bst r9,5 - bld r6,3 - bst r28,6 - bld r9,5 - bst r9,0 - bld r28,6 - bst r0,0 - bld r9,0 - bst r22,3 - bld r0,0 - bst r23,4 - bld r22,3 - bst r4,0 - bld r23,4 - bst r28,1 - bld r4,0 - bst r6,4 - bld r28,1 - bst r10,1 - bld r6,4 - bst r2,6 - bld r10,1 - bst r25,0 - bld r2,6 - bst r8,3 - bld r25,0 - bst r25,5 - bld r8,3 - bst r10,7 - bld r25,5 - bst r5,6 - bld r10,7 - bst r7,1 - bld r5,6 - bst r12,5 - bld r7,1 - bst r12,6 - bld r12,5 - bst r13,2 - bld r12,6 - bst r15,2 - bld r13,2 - bst r29,3 - bld r15,2 - bst r11,4 - bld r29,3 - bst r8,2 - bld r11,4 - bst r25,1 - bld r8,2 - bst r8,7 - bld r25,1 - bst r17,5 - bld r8,7 - bst r16,7 - bld r17,5 - bst r15,7 - bld r16,7 - bst r3,7 - bld r15,7 - bst r23,5 - bld r3,7 - bst r4,4 - bld r23,5 - bst r2,1 - bld r4,4 - bst r14,4 - bld r2,1 - bst r0,0 - bld r14,4 - bst r22,5 - bld r0,0 - bst r28,4 - bld r22,5 - bst r8,0 - bld r28,4 - bst r24,1 - bld r8,0 - bst r4,7 - bld r24,1 - bst r3,5 - bld r4,7 - bst r0,0 - bld r3,5 - bst r22,6 - bld r0,0 - bst r29,0 - bld r22,6 - bst r10,0 - bld r29,0 - bst r2,2 - bld r10,0 - bst r15,0 - bld r2,2 - bst r28,3 - bld r15,0 - bst r7,4 - bld r28,3 - bst r14,1 - bld r7,4 - bst r16,6 - bld r14,1 - bst r15,3 - bld r16,6 - bst r29,7 - bld r15,3 - bst r13,4 - bld r29,7 - bst r24,2 - bld r13,4 - bst r5,3 - bld r24,2 - bst r5,5 - bld r5,3 - bst r6,5 - bld r5,5 - bst r10,5 - bld r6,5 - bst r4,6 - bld r10,5 - bst r3,1 - bld r4,6 - bst r16,4 - bld r3,1 - bst r14,3 - bld r16,4 - bst r17,6 - bld r14,3 - bst r17,3 - bld r17,6 - bst r25,7 - bld r17,3 - bst r11,7 - bld r25,7 - bst r9,6 - bld r11,7 - bst r29,2 - bld r9,6 - bst r11,0 - bld r29,2 - bst r6,2 - bld r11,0 - bst r9,1 - bld r6,2 - bst r0,0 - bld r9,1 - bst r22,7 - bld r0,0 - bst r29,4 - bld r22,7 - bst r12,0 - bld r29,4 - bst r10,2 - bld r12,0 - bst r3,2 - bld r10,2 - bst r17,0 - bld r3,2 - bst r24,3 - bld r17,0 - bst r5,7 - bld r24,3 - bst r7,5 - bld r5,7 - bst r14,5 - bld r7,5 - bst r0,0 - bld r14,5 - bst r23,2 - bld r0,0 - bst r3,0 - bld r23,2 - bst r16,0 - bld r3,0 - bst r12,3 - bld r16,0 - bst r11,6 - bld r12,3 - bst r9,2 - bld r11,6 - bst r0,0 - bld r9,2 - bst r23,6 - bld r0,0 - bst r5,0 - bld r23,6 - bst r4,1 - bld r5,0 - bst r28,5 - bld r4,1 - bst r8,4 - bld r28,5 - bst r16,1 - bld r8,4 - bst r12,7 - bld r16,1 - bst r13,6 - bld r12,7 - bst r25,2 - bld r13,6 - bst r9,3 - bld r25,2 - bst r0,0 - bld r9,3 - bst r23,7 - bld r0,0 - bst r5,4 - bld r23,7 - bst r6,1 - bld r5,4 - bst r8,5 - bld r6,1 - bst r16,5 - bld r8,5 - bst r14,7 - bld r16,5 - bst r0,0 - bld r14,7 - bst r29,1 - bld r0,0 - bst r10,4 - bld r29,1 - bst r4,2 - bld r10,4 - bst r0,0 - bld r4,2 - bst r29,6 - bld r0,0 - bst r13,0 - bld r29,6 - bst r14,2 - bld r13,0 - bst r17,2 - bld r14,2 - bst r25,3 - bld r17,2 - bst r9,7 - bld r25,3 - bst r0,0 - bld r9,7 - bst r2,3 - bld r0,0 - bst r15,4 - bld r2,3 - bst r0,0 - bld r15,4 - bst r6,6 - bld r0,0 - bst r11,1 - bld r6,6 - bst r0,0 - bld r11,1 - bst r7,3 - bld r0,0 - bst r13,5 - bld r7,3 - bst r24,6 - bld r13,5 - bst r0,0 - bld r24,6 - dec r18 - breq 5445f - rjmp 27b -5445: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - st X+,r22 - st X+,r23 - st X+,r28 - st X+,r29 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - st X+,r16 - st X+,r17 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size spongent176_permute, .-spongent176_permute - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.c deleted file mode 100644 index 8e0d57d..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-spongent.h" - -#if !defined(__AVR__) - -/** - * \brief Applies the Spongent-pi S-box in parallel to the 8 nibbles - * of a 32-bit word. - * - * \param x3 The input values to the parallel S-boxes. - * - * \return The output values from the parallel S-boxes. - * - * Based on the bit-sliced S-box implementation from here: - * https://github.com/DadaIsCrazy/usuba/blob/master/data/sboxes/spongent.ua - * - * Note that spongent.ua numbers bits from highest to lowest, so x0 is the - * high bit of each nibble and x3 is the low bit. - */ -static uint32_t spongent_sbox(uint32_t x3) -{ - uint32_t q0, q1, q2, q3, t0, t1, t2, t3; - uint32_t x2 = (x3 >> 1); - uint32_t x1 = (x2 >> 1); - uint32_t x0 = (x1 >> 1); - q0 = x0 ^ x2; - q1 = x1 ^ x2; - t0 = q0 & q1; - q2 = ~(x0 ^ x1 ^ x3 ^ t0); - t1 = q2 & ~x0; - q3 = x1 ^ t1; - t2 = q3 & (q3 ^ x2 ^ x3 ^ t0); - t3 = (x2 ^ t0) & ~(x1 ^ t0); - q0 = x1 ^ x2 ^ x3 ^ t2; - q1 = x0 ^ x2 ^ x3 ^ t0 ^ t1; - q2 = x0 ^ x1 ^ x2 ^ t1; - q3 = x0 ^ x3 ^ t0 ^ t3; - return ((q0 << 3) & 0x88888888U) | ((q1 << 2) & 0x44444444U) | - ((q2 << 1) & 0x22222222U) | (q3 & 0x11111111U); -} - -void spongent160_permute(spongent160_state_t *state) -{ - static uint8_t const RC[] = { - /* Round constants for Spongent-pi[160] */ - 0x75, 0xae, 0x6a, 0x56, 0x54, 0x2a, 0x29, 0x94, - 0x53, 0xca, 0x27, 0xe4, 0x4f, 0xf2, 0x1f, 0xf8, - 0x3e, 0x7c, 0x7d, 0xbe, 0x7a, 0x5e, 0x74, 0x2e, - 0x68, 0x16, 0x50, 0x0a, 0x21, 0x84, 0x43, 0xc2, - 0x07, 0xe0, 0x0e, 0x70, 0x1c, 0x38, 0x38, 0x1c, - 0x71, 0x8e, 0x62, 0x46, 0x44, 0x22, 0x09, 0x90, - 0x12, 0x48, 0x24, 0x24, 0x49, 0x92, 0x13, 0xc8, - 0x26, 0x64, 0x4d, 0xb2, 0x1b, 0xd8, 0x36, 0x6c, - 0x6d, 0xb6, 0x5a, 0x5a, 0x35, 0xac, 0x6b, 0xd6, - 0x56, 0x6a, 0x2d, 0xb4, 0x5b, 0xda, 0x37, 0xec, - 0x6f, 0xf6, 0x5e, 0x7a, 0x3d, 0xbc, 0x7b, 0xde, - 0x76, 0x6e, 0x6c, 0x36, 0x58, 0x1a, 0x31, 0x8c, - 0x63, 0xc6, 0x46, 0x62, 0x0d, 0xb0, 0x1a, 0x58, - 0x34, 0x2c, 0x69, 0x96, 0x52, 0x4a, 0x25, 0xa4, - 0x4b, 0xd2, 0x17, 0xe8, 0x2e, 0x74, 0x5d, 0xba, - 0x3b, 0xdc, 0x77, 0xee, 0x6e, 0x76, 0x5c, 0x3a, - 0x39, 0x9c, 0x73, 0xce, 0x66, 0x66, 0x4c, 0x32, - 0x19, 0x98, 0x32, 0x4c, 0x65, 0xa6, 0x4a, 0x52, - 0x15, 0xa8, 0x2a, 0x54, 0x55, 0xaa, 0x2b, 0xd4, - 0x57, 0xea, 0x2f, 0xf4, 0x5f, 0xfa, 0x3f, 0xfc - }; - const uint8_t *rc = RC; - uint32_t x0, x1, x2, x3, x4; - uint32_t t0, t1, t2, t3, t4; - uint8_t round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = state->W[0]; - x1 = state->W[1]; - x2 = state->W[2]; - x3 = state->W[3]; - x4 = state->W[4]; -#else - x0 = le_load_word32(state->B); - x1 = le_load_word32(state->B + 4); - x2 = le_load_word32(state->B + 8); - x3 = le_load_word32(state->B + 12); - x4 = le_load_word32(state->B + 16); -#endif - - /* Perform the 80 rounds of Spongent-pi[160] */ - for (round = 0; round < 80; ++round, rc += 2) { - /* Add the round constant to front and back of the state */ - x0 ^= rc[0]; - x4 ^= ((uint32_t)(rc[1])) << 24; - - /* Apply the S-box to all 4-bit groups in the state */ - t0 = spongent_sbox(x0); - t1 = spongent_sbox(x1); - t2 = spongent_sbox(x2); - t3 = spongent_sbox(x3); - t4 = spongent_sbox(x4); - - /* Permute the bits of the state. Bit i is moved to (40 * i) % 159 - * for all bits except the last which is left where it is. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - #define BCP(x, bit) ((x) & (((uint32_t)1) << (bit))) - #define BUP(x, from, to) \ - (((x) << ((to) - (from))) & (((uint32_t)1) << (to))) - #define BDN(x, from, to) \ - (((x) >> ((from) - (to))) & (((uint32_t)1) << (to))) - x0 = BCP(t0, 0) ^ BDN(t0, 4, 1) ^ BDN(t0, 8, 2) ^ - BDN(t0, 12, 3) ^ BDN(t0, 16, 4) ^ BDN(t0, 20, 5) ^ - BDN(t0, 24, 6) ^ BDN(t0, 28, 7) ^ BUP(t1, 0, 8) ^ - BUP(t1, 4, 9) ^ BUP(t1, 8, 10) ^ BDN(t1, 12, 11) ^ - BDN(t1, 16, 12) ^ BDN(t1, 20, 13) ^ BDN(t1, 24, 14) ^ - BDN(t1, 28, 15) ^ BUP(t2, 0, 16) ^ BUP(t2, 4, 17) ^ - BUP(t2, 8, 18) ^ BUP(t2, 12, 19) ^ BUP(t2, 16, 20) ^ - BUP(t2, 20, 21) ^ BDN(t2, 24, 22) ^ BDN(t2, 28, 23) ^ - BUP(t3, 0, 24) ^ BUP(t3, 4, 25) ^ BUP(t3, 8, 26) ^ - BUP(t3, 12, 27) ^ BUP(t3, 16, 28) ^ BUP(t3, 20, 29) ^ - BUP(t3, 24, 30) ^ BUP(t3, 28, 31); - x1 = BUP(t0, 1, 8) ^ BUP(t0, 5, 9) ^ BUP(t0, 9, 10) ^ - BDN(t0, 13, 11) ^ BDN(t0, 17, 12) ^ BDN(t0, 21, 13) ^ - BDN(t0, 25, 14) ^ BDN(t0, 29, 15) ^ BUP(t1, 1, 16) ^ - BUP(t1, 5, 17) ^ BUP(t1, 9, 18) ^ BUP(t1, 13, 19) ^ - BUP(t1, 17, 20) ^ BCP(t1, 21) ^ BDN(t1, 25, 22) ^ - BDN(t1, 29, 23) ^ BUP(t2, 1, 24) ^ BUP(t2, 5, 25) ^ - BUP(t2, 9, 26) ^ BUP(t2, 13, 27) ^ BUP(t2, 17, 28) ^ - BUP(t2, 21, 29) ^ BUP(t2, 25, 30) ^ BUP(t2, 29, 31) ^ - BCP(t4, 0) ^ BDN(t4, 4, 1) ^ BDN(t4, 8, 2) ^ - BDN(t4, 12, 3) ^ BDN(t4, 16, 4) ^ BDN(t4, 20, 5) ^ - BDN(t4, 24, 6) ^ BDN(t4, 28, 7); - x2 = BUP(t0, 2, 16) ^ BUP(t0, 6, 17) ^ BUP(t0, 10, 18) ^ - BUP(t0, 14, 19) ^ BUP(t0, 18, 20) ^ BDN(t0, 22, 21) ^ - BDN(t0, 26, 22) ^ BDN(t0, 30, 23) ^ BUP(t1, 2, 24) ^ - BUP(t1, 6, 25) ^ BUP(t1, 10, 26) ^ BUP(t1, 14, 27) ^ - BUP(t1, 18, 28) ^ BUP(t1, 22, 29) ^ BUP(t1, 26, 30) ^ - BUP(t1, 30, 31) ^ BDN(t3, 1, 0) ^ BDN(t3, 5, 1) ^ - BDN(t3, 9, 2) ^ BDN(t3, 13, 3) ^ BDN(t3, 17, 4) ^ - BDN(t3, 21, 5) ^ BDN(t3, 25, 6) ^ BDN(t3, 29, 7) ^ - BUP(t4, 1, 8) ^ BUP(t4, 5, 9) ^ BUP(t4, 9, 10) ^ - BDN(t4, 13, 11) ^ BDN(t4, 17, 12) ^ BDN(t4, 21, 13) ^ - BDN(t4, 25, 14) ^ BDN(t4, 29, 15); - x3 = BUP(t0, 3, 24) ^ BUP(t0, 7, 25) ^ BUP(t0, 11, 26) ^ - BUP(t0, 15, 27) ^ BUP(t0, 19, 28) ^ BUP(t0, 23, 29) ^ - BUP(t0, 27, 30) ^ BCP(t0, 31) ^ BDN(t2, 2, 0) ^ - BDN(t2, 6, 1) ^ BDN(t2, 10, 2) ^ BDN(t2, 14, 3) ^ - BDN(t2, 18, 4) ^ BDN(t2, 22, 5) ^ BDN(t2, 26, 6) ^ - BDN(t2, 30, 7) ^ BUP(t3, 2, 8) ^ BUP(t3, 6, 9) ^ - BCP(t3, 10) ^ BDN(t3, 14, 11) ^ BDN(t3, 18, 12) ^ - BDN(t3, 22, 13) ^ BDN(t3, 26, 14) ^ BDN(t3, 30, 15) ^ - BUP(t4, 2, 16) ^ BUP(t4, 6, 17) ^ BUP(t4, 10, 18) ^ - BUP(t4, 14, 19) ^ BUP(t4, 18, 20) ^ BDN(t4, 22, 21) ^ - BDN(t4, 26, 22) ^ BDN(t4, 30, 23); - x4 = BDN(t1, 3, 0) ^ BDN(t1, 7, 1) ^ BDN(t1, 11, 2) ^ - BDN(t1, 15, 3) ^ BDN(t1, 19, 4) ^ BDN(t1, 23, 5) ^ - BDN(t1, 27, 6) ^ BDN(t1, 31, 7) ^ BUP(t2, 3, 8) ^ - BUP(t2, 7, 9) ^ BDN(t2, 11, 10) ^ BDN(t2, 15, 11) ^ - BDN(t2, 19, 12) ^ BDN(t2, 23, 13) ^ BDN(t2, 27, 14) ^ - BDN(t2, 31, 15) ^ BUP(t3, 3, 16) ^ BUP(t3, 7, 17) ^ - BUP(t3, 11, 18) ^ BUP(t3, 15, 19) ^ BUP(t3, 19, 20) ^ - BDN(t3, 23, 21) ^ BDN(t3, 27, 22) ^ BDN(t3, 31, 23) ^ - BUP(t4, 3, 24) ^ BUP(t4, 7, 25) ^ BUP(t4, 11, 26) ^ - BUP(t4, 15, 27) ^ BUP(t4, 19, 28) ^ BUP(t4, 23, 29) ^ - BUP(t4, 27, 30) ^ BCP(t4, 31); - } - - /* Store the local variables back to the state in little-endian order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = x0; - state->W[1] = x1; - state->W[2] = x2; - state->W[3] = x3; - state->W[4] = x4; -#else - le_store_word32(state->B, x0); - le_store_word32(state->B + 4, x1); - le_store_word32(state->B + 8, x2); - le_store_word32(state->B + 12, x3); - le_store_word32(state->B + 16, x4); -#endif -} - -void spongent176_permute(spongent176_state_t *state) -{ - static uint8_t const RC[] = { - /* Round constants for Spongent-pi[176] */ - 0x45, 0xa2, 0x0b, 0xd0, 0x16, 0x68, 0x2c, 0x34, - 0x59, 0x9a, 0x33, 0xcc, 0x67, 0xe6, 0x4e, 0x72, - 0x1d, 0xb8, 0x3a, 0x5c, 0x75, 0xae, 0x6a, 0x56, - 0x54, 0x2a, 0x29, 0x94, 0x53, 0xca, 0x27, 0xe4, - 0x4f, 0xf2, 0x1f, 0xf8, 0x3e, 0x7c, 0x7d, 0xbe, - 0x7a, 0x5e, 0x74, 0x2e, 0x68, 0x16, 0x50, 0x0a, - 0x21, 0x84, 0x43, 0xc2, 0x07, 0xe0, 0x0e, 0x70, - 0x1c, 0x38, 0x38, 0x1c, 0x71, 0x8e, 0x62, 0x46, - 0x44, 0x22, 0x09, 0x90, 0x12, 0x48, 0x24, 0x24, - 0x49, 0x92, 0x13, 0xc8, 0x26, 0x64, 0x4d, 0xb2, - 0x1b, 0xd8, 0x36, 0x6c, 0x6d, 0xb6, 0x5a, 0x5a, - 0x35, 0xac, 0x6b, 0xd6, 0x56, 0x6a, 0x2d, 0xb4, - 0x5b, 0xda, 0x37, 0xec, 0x6f, 0xf6, 0x5e, 0x7a, - 0x3d, 0xbc, 0x7b, 0xde, 0x76, 0x6e, 0x6c, 0x36, - 0x58, 0x1a, 0x31, 0x8c, 0x63, 0xc6, 0x46, 0x62, - 0x0d, 0xb0, 0x1a, 0x58, 0x34, 0x2c, 0x69, 0x96, - 0x52, 0x4a, 0x25, 0xa4, 0x4b, 0xd2, 0x17, 0xe8, - 0x2e, 0x74, 0x5d, 0xba, 0x3b, 0xdc, 0x77, 0xee, - 0x6e, 0x76, 0x5c, 0x3a, 0x39, 0x9c, 0x73, 0xce, - 0x66, 0x66, 0x4c, 0x32, 0x19, 0x98, 0x32, 0x4c, - 0x65, 0xa6, 0x4a, 0x52, 0x15, 0xa8, 0x2a, 0x54, - 0x55, 0xaa, 0x2b, 0xd4, 0x57, 0xea, 0x2f, 0xf4, - 0x5f, 0xfa, 0x3f, 0xfc - }; - const uint8_t *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t t0, t1, t2, t3, t4, t5; - uint8_t round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = state->W[0]; - x1 = state->W[1]; - x2 = state->W[2]; - x3 = state->W[3]; - x4 = state->W[4]; - x5 = state->W[5]; -#else - x0 = le_load_word32(state->B); - x1 = le_load_word32(state->B + 4); - x2 = le_load_word32(state->B + 8); - x3 = le_load_word32(state->B + 12); - x4 = le_load_word32(state->B + 16); - x5 = le_load_word16(state->B + 20); /* Last word is only 16 bits */ -#endif - - /* Perform the 90 rounds of Spongent-pi[176] */ - for (round = 0; round < 90; ++round, rc += 2) { - /* Add the round constant to front and back of the state */ - x0 ^= rc[0]; - x5 ^= ((uint32_t)(rc[1])) << 8; - - /* Apply the S-box to all 4-bit groups in the state */ - t0 = spongent_sbox(x0); - t1 = spongent_sbox(x1); - t2 = spongent_sbox(x2); - t3 = spongent_sbox(x3); - t4 = spongent_sbox(x4); - t5 = spongent_sbox(x5); - - /* Permute the bits of the state. Bit i is moved to (44 * i) % 175 - * for all bits except the last which is left where it is. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - x0 = BCP(t0, 0) ^ BDN(t0, 4, 1) ^ BDN(t0, 8, 2) ^ - BDN(t0, 12, 3) ^ BDN(t0, 16, 4) ^ BDN(t0, 20, 5) ^ - BDN(t0, 24, 6) ^ BDN(t0, 28, 7) ^ BUP(t1, 0, 8) ^ - BUP(t1, 4, 9) ^ BUP(t1, 8, 10) ^ BDN(t1, 12, 11) ^ - BDN(t1, 16, 12) ^ BDN(t1, 20, 13) ^ BDN(t1, 24, 14) ^ - BDN(t1, 28, 15) ^ BUP(t2, 0, 16) ^ BUP(t2, 4, 17) ^ - BUP(t2, 8, 18) ^ BUP(t2, 12, 19) ^ BUP(t2, 16, 20) ^ - BUP(t2, 20, 21) ^ BDN(t2, 24, 22) ^ BDN(t2, 28, 23) ^ - BUP(t3, 0, 24) ^ BUP(t3, 4, 25) ^ BUP(t3, 8, 26) ^ - BUP(t3, 12, 27) ^ BUP(t3, 16, 28) ^ BUP(t3, 20, 29) ^ - BUP(t3, 24, 30) ^ BUP(t3, 28, 31); - x1 = BUP(t0, 1, 12) ^ BUP(t0, 5, 13) ^ BUP(t0, 9, 14) ^ - BUP(t0, 13, 15) ^ BDN(t0, 17, 16) ^ BDN(t0, 21, 17) ^ - BDN(t0, 25, 18) ^ BDN(t0, 29, 19) ^ BUP(t1, 1, 20) ^ - BUP(t1, 5, 21) ^ BUP(t1, 9, 22) ^ BUP(t1, 13, 23) ^ - BUP(t1, 17, 24) ^ BUP(t1, 21, 25) ^ BUP(t1, 25, 26) ^ - BDN(t1, 29, 27) ^ BUP(t2, 1, 28) ^ BUP(t2, 5, 29) ^ - BUP(t2, 9, 30) ^ BUP(t2, 13, 31) ^ BCP(t4, 0) ^ - BDN(t4, 4, 1) ^ BDN(t4, 8, 2) ^ BDN(t4, 12, 3) ^ - BDN(t4, 16, 4) ^ BDN(t4, 20, 5) ^ BDN(t4, 24, 6) ^ - BDN(t4, 28, 7) ^ BUP(t5, 0, 8) ^ BUP(t5, 4, 9) ^ - BUP(t5, 8, 10) ^ BDN(t5, 12, 11); - x2 = BUP(t0, 2, 24) ^ BUP(t0, 6, 25) ^ BUP(t0, 10, 26) ^ - BUP(t0, 14, 27) ^ BUP(t0, 18, 28) ^ BUP(t0, 22, 29) ^ - BUP(t0, 26, 30) ^ BUP(t0, 30, 31) ^ BDN(t2, 17, 0) ^ - BDN(t2, 21, 1) ^ BDN(t2, 25, 2) ^ BDN(t2, 29, 3) ^ - BUP(t3, 1, 4) ^ BCP(t3, 5) ^ BDN(t3, 9, 6) ^ - BDN(t3, 13, 7) ^ BDN(t3, 17, 8) ^ BDN(t3, 21, 9) ^ - BDN(t3, 25, 10) ^ BDN(t3, 29, 11) ^ BUP(t4, 1, 12) ^ - BUP(t4, 5, 13) ^ BUP(t4, 9, 14) ^ BUP(t4, 13, 15) ^ - BDN(t4, 17, 16) ^ BDN(t4, 21, 17) ^ BDN(t4, 25, 18) ^ - BDN(t4, 29, 19) ^ BUP(t5, 1, 20) ^ BUP(t5, 5, 21) ^ - BUP(t5, 9, 22) ^ BUP(t5, 13, 23); - x3 = BDN(t1, 2, 0) ^ BDN(t1, 6, 1) ^ BDN(t1, 10, 2) ^ - BDN(t1, 14, 3) ^ BDN(t1, 18, 4) ^ BDN(t1, 22, 5) ^ - BDN(t1, 26, 6) ^ BDN(t1, 30, 7) ^ BUP(t2, 2, 8) ^ - BUP(t2, 6, 9) ^ BCP(t2, 10) ^ BDN(t2, 14, 11) ^ - BDN(t2, 18, 12) ^ BDN(t2, 22, 13) ^ BDN(t2, 26, 14) ^ - BDN(t2, 30, 15) ^ BUP(t3, 2, 16) ^ BUP(t3, 6, 17) ^ - BUP(t3, 10, 18) ^ BUP(t3, 14, 19) ^ BUP(t3, 18, 20) ^ - BDN(t3, 22, 21) ^ BDN(t3, 26, 22) ^ BDN(t3, 30, 23) ^ - BUP(t4, 2, 24) ^ BUP(t4, 6, 25) ^ BUP(t4, 10, 26) ^ - BUP(t4, 14, 27) ^ BUP(t4, 18, 28) ^ BUP(t4, 22, 29) ^ - BUP(t4, 26, 30) ^ BUP(t4, 30, 31); - x4 = BUP(t0, 3, 4) ^ BDN(t0, 7, 5) ^ BDN(t0, 11, 6) ^ - BDN(t0, 15, 7) ^ BDN(t0, 19, 8) ^ BDN(t0, 23, 9) ^ - BDN(t0, 27, 10) ^ BDN(t0, 31, 11) ^ BUP(t1, 3, 12) ^ - BUP(t1, 7, 13) ^ BUP(t1, 11, 14) ^ BCP(t1, 15) ^ - BDN(t1, 19, 16) ^ BDN(t1, 23, 17) ^ BDN(t1, 27, 18) ^ - BDN(t1, 31, 19) ^ BUP(t2, 3, 20) ^ BUP(t2, 7, 21) ^ - BUP(t2, 11, 22) ^ BUP(t2, 15, 23) ^ BUP(t2, 19, 24) ^ - BUP(t2, 23, 25) ^ BDN(t2, 27, 26) ^ BDN(t2, 31, 27) ^ - BUP(t3, 3, 28) ^ BUP(t3, 7, 29) ^ BUP(t3, 11, 30) ^ - BUP(t3, 15, 31) ^ BDN(t5, 2, 0) ^ BDN(t5, 6, 1) ^ - BDN(t5, 10, 2) ^ BDN(t5, 14, 3); - x5 = BDN(t3, 19, 0) ^ BDN(t3, 23, 1) ^ BDN(t3, 27, 2) ^ - BDN(t3, 31, 3) ^ BUP(t4, 3, 4) ^ BDN(t4, 7, 5) ^ - BDN(t4, 11, 6) ^ BDN(t4, 15, 7) ^ BDN(t4, 19, 8) ^ - BDN(t4, 23, 9) ^ BDN(t4, 27, 10) ^ BDN(t4, 31, 11) ^ - BUP(t5, 3, 12) ^ BUP(t5, 7, 13) ^ BUP(t5, 11, 14) ^ - BCP(t5, 15); - } - - /* Store the local variables back to the state in little-endian order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = x0; - state->W[1] = x1; - state->W[2] = x2; - state->W[3] = x3; - state->W[4] = x4; - state->W[5] = x5; -#else - le_store_word32(state->B, x0); - le_store_word32(state->B + 4, x1); - le_store_word32(state->B + 8, x2); - le_store_word32(state->B + 12, x3); - le_store_word32(state->B + 16, x4); - le_store_word16(state->B + 20, x5); /* Last word is only 16 bits */ -#endif -} - -#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.h deleted file mode 100644 index bb9823f..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-spongent.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPONGENT_H -#define LW_INTERNAL_SPONGENT_H - -#include "internal-util.h" - -/** - * \file internal-spongent.h - * \brief Internal implementation of the Spongent-pi permutation. - * - * References: https://www.esat.kuleuven.be/cosic/elephant/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the Spongent-pi[160] state in bytes. - */ -#define SPONGENT160_STATE_SIZE 20 - -/** - * \brief Size of the Spongent-pi[176] state in bytes. - */ -#define SPONGENT176_STATE_SIZE 22 - -/** - * \brief Structure of the internal state of the Spongent-pi[160] permutation. - */ -typedef union -{ - uint32_t W[5]; /**< Spongent-pi[160] state as 32-bit words */ - uint8_t B[20]; /**< Spongent-pi[160] state as bytes */ - -} spongent160_state_t; - -/** - * \brief Structure of the internal state of the Spongent-pi[176] permutation. - * - * Note: The state is technically only 176 bits, but we increase it to - * 192 bits so that we can use 32-bit word operations to manipulate the - * state. The extra bits in the last word are fixed to zero. - */ -typedef union -{ - uint32_t W[6]; /**< Spongent-pi[176] state as 32-bit words */ - uint8_t B[24]; /**< Spongent-pi[176] state as bytes */ - -} spongent176_state_t; - -/** - * \brief Permutes the Spongent-pi[160] state. - * - * \param state The Spongent-pi[160] state to be permuted. - */ -void spongent160_permute(spongent160_state_t *state); - -/** - * \brief Permutes the Spongent-pi[176] state. - * - * \param state The Spongent-pi[176] state to be permuted. - */ -void spongent176_permute(spongent176_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-util.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys/elephant.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys/elephant.c index 770f568..2f7abb3 100644 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys/elephant.c +++ b/elephant/Implementations/crypto_aead/elephant176v1/rhys/elephant.c @@ -660,7 +660,7 @@ static void delirium_process_ad if (size <= adlen) { /* Process a complete block */ lw_xor_block(state->B + posn, ad, size); - keccakp_200_permute(state, 18); + keccakp_200_permute(state); lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); @@ -680,7 +680,7 @@ static void delirium_process_ad /* Pad and absorb the final block */ state->B[posn] ^= 0x01; - keccakp_200_permute(state, 18); + keccakp_200_permute(state); lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); @@ -707,7 +707,7 @@ int delirium_aead_encrypt /* Hash the key and generate the initial mask */ memcpy(state.B, k, DELIRIUM_KEY_SIZE); memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); memcpy(mask, state.B, DELIRIUM_KEY_SIZE); memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); memcpy(start, mask, sizeof(mask)); @@ -726,7 +726,7 @@ int delirium_aead_encrypt /* Encrypt using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, m, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); memcpy(c, state.B, KECCAKP_200_STATE_SIZE); @@ -735,7 +735,7 @@ int delirium_aead_encrypt delirium_lfsr(next, mask); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -751,7 +751,7 @@ int delirium_aead_encrypt unsigned temp = (unsigned)mlen; memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, m, temp); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); memcpy(c, state.B, temp); @@ -762,7 +762,7 @@ int delirium_aead_encrypt memset(state.B + temp + 1, 0, KECCAKP_200_STATE_SIZE - temp - 1); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -772,7 +772,7 @@ int delirium_aead_encrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); state.B[0] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -807,7 +807,7 @@ int delirium_aead_decrypt /* Hash the key and generate the initial mask */ memcpy(state.B, k, DELIRIUM_KEY_SIZE); memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); memcpy(mask, state.B, DELIRIUM_KEY_SIZE); memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); memcpy(start, mask, sizeof(mask)); @@ -828,7 +828,7 @@ int delirium_aead_decrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, c, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -836,7 +836,7 @@ int delirium_aead_decrypt /* Decrypt using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block_2_src(m, state.B, c, KECCAKP_200_STATE_SIZE); @@ -853,7 +853,7 @@ int delirium_aead_decrypt lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, c, temp); state.B[temp] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -861,7 +861,7 @@ int delirium_aead_decrypt /* Decrypt the last block using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, temp); lw_xor_block_2_src(m, state.B, c, temp); c += temp; @@ -870,7 +870,7 @@ int delirium_aead_decrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); state.B[0] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak-avr.S b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak-avr.S new file mode 100644 index 0000000..e50ccaf --- /dev/null +++ b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak-avr.S @@ -0,0 +1,1552 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global keccakp_200_permute + .type keccakp_200_permute, @function +keccakp_200_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + ldd r24,Z+24 + push r31 + push r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,130 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + mov r30,r1 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,129 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + ldi r30,136 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,10 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,137 + eor r18,r30 + rcall 82f + ldi r30,3 + eor r18,r30 + rcall 82f + ldi r30,2 + eor r18,r30 + rcall 82f + ldi r30,128 + eor r18,r30 + rjmp 420f +82: + mov r30,r18 + eor r30,r23 + eor r30,r2 + eor r30,r7 + eor r30,r12 + mov r31,r19 + eor r31,r26 + eor r31,r3 + eor r31,r8 + eor r31,r13 + mov r25,r20 + eor r25,r27 + eor r25,r4 + eor r25,r9 + eor r25,r14 + mov r16,r21 + eor r16,r28 + eor r16,r5 + eor r16,r10 + eor r16,r15 + mov r17,r22 + eor r17,r29 + eor r17,r6 + eor r17,r11 + eor r17,r24 + mov r0,r31 + lsl r0 + adc r0,r1 + eor r0,r17 + eor r18,r0 + eor r23,r0 + eor r2,r0 + eor r7,r0 + eor r12,r0 + mov r0,r25 + lsl r0 + adc r0,r1 + eor r0,r30 + eor r19,r0 + eor r26,r0 + eor r3,r0 + eor r8,r0 + eor r13,r0 + mov r0,r16 + lsl r0 + adc r0,r1 + eor r0,r31 + eor r20,r0 + eor r27,r0 + eor r4,r0 + eor r9,r0 + eor r14,r0 + mov r0,r17 + lsl r0 + adc r0,r1 + eor r0,r25 + eor r21,r0 + eor r28,r0 + eor r5,r0 + eor r10,r0 + eor r15,r0 + mov r0,r30 + lsl r0 + adc r0,r1 + eor r0,r16 + eor r22,r0 + eor r29,r0 + eor r6,r0 + eor r11,r0 + eor r24,r0 + mov r30,r19 + swap r26 + mov r19,r26 + swap r29 + mov r26,r29 + mov r0,r1 + lsr r14 + ror r0 + lsr r14 + ror r0 + lsr r14 + ror r0 + or r14,r0 + mov r29,r14 + bst r6,0 + lsr r6 + bld r6,7 + mov r14,r6 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + mov r6,r12 + mov r0,r1 + lsr r20 + ror r0 + lsr r20 + ror r0 + or r20,r0 + mov r12,r20 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + mov r20,r4 + lsl r5 + adc r5,r1 + mov r4,r5 + mov r5,r11 + mov r11,r15 + lsl r7 + adc r7,r1 + mov r15,r7 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + mov r7,r22 + mov r0,r1 + lsr r24 + ror r0 + lsr r24 + ror r0 + or r24,r0 + mov r22,r24 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r24,r13 + bst r28,0 + lsr r28 + bld r28,7 + mov r13,r28 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r28,r8 + swap r23 + mov r8,r23 + swap r21 + mov r23,r21 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r21,r10 + bst r9,0 + lsr r9 + bld r9,7 + mov r10,r9 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + mov r9,r3 + mov r0,r1 + lsr r27 + ror r0 + lsr r27 + ror r0 + or r27,r0 + mov r3,r27 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + mov r27,r2 + lsl r30 + adc r30,r1 + mov r2,r30 + mov r30,r18 + mov r31,r19 + mov r25,r20 + mov r16,r21 + mov r17,r22 + mov r18,r25 + mov r0,r31 + com r0 + and r18,r0 + eor r18,r30 + mov r19,r16 + mov r0,r25 + com r0 + and r19,r0 + eor r19,r31 + mov r20,r17 + mov r0,r16 + com r0 + and r20,r0 + eor r20,r25 + mov r21,r30 + mov r0,r17 + com r0 + and r21,r0 + eor r21,r16 + mov r22,r31 + mov r0,r30 + com r0 + and r22,r0 + eor r22,r17 + mov r30,r23 + mov r31,r26 + mov r25,r27 + mov r16,r28 + mov r17,r29 + mov r23,r25 + mov r0,r31 + com r0 + and r23,r0 + eor r23,r30 + mov r26,r16 + mov r0,r25 + com r0 + and r26,r0 + eor r26,r31 + mov r27,r17 + mov r0,r16 + com r0 + and r27,r0 + eor r27,r25 + mov r28,r30 + mov r0,r17 + com r0 + and r28,r0 + eor r28,r16 + mov r29,r31 + mov r0,r30 + com r0 + and r29,r0 + eor r29,r17 + mov r30,r2 + mov r31,r3 + mov r25,r4 + mov r16,r5 + mov r17,r6 + mov r2,r25 + mov r0,r31 + com r0 + and r2,r0 + eor r2,r30 + mov r3,r16 + mov r0,r25 + com r0 + and r3,r0 + eor r3,r31 + mov r4,r17 + mov r0,r16 + com r0 + and r4,r0 + eor r4,r25 + mov r5,r30 + mov r0,r17 + com r0 + and r5,r0 + eor r5,r16 + mov r6,r31 + mov r0,r30 + com r0 + and r6,r0 + eor r6,r17 + mov r30,r7 + mov r31,r8 + mov r25,r9 + mov r16,r10 + mov r17,r11 + mov r7,r25 + mov r0,r31 + com r0 + and r7,r0 + eor r7,r30 + mov r8,r16 + mov r0,r25 + com r0 + and r8,r0 + eor r8,r31 + mov r9,r17 + mov r0,r16 + com r0 + and r9,r0 + eor r9,r25 + mov r10,r30 + mov r0,r17 + com r0 + and r10,r0 + eor r10,r16 + mov r11,r31 + mov r0,r30 + com r0 + and r11,r0 + eor r11,r17 + mov r30,r12 + mov r31,r13 + mov r25,r14 + mov r16,r15 + mov r17,r24 + mov r12,r25 + mov r0,r31 + com r0 + and r12,r0 + eor r12,r30 + mov r13,r16 + mov r0,r25 + com r0 + and r13,r0 + eor r13,r31 + mov r14,r17 + mov r0,r16 + com r0 + and r14,r0 + eor r14,r25 + mov r15,r30 + mov r0,r17 + com r0 + and r15,r0 + eor r15,r16 + mov r24,r31 + mov r0,r30 + com r0 + and r24,r0 + eor r24,r17 + ret +420: + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + std Z+24,r24 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size keccakp_200_permute, .-keccakp_200_permute + + .text +.global keccakp_400_permute + .type keccakp_400_permute, @function +keccakp_400_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + movw r30,r24 +.L__stack_usage = 17 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + ldd r10,Z+4 + ldd r11,Z+5 + ldd r12,Z+6 + ldd r13,Z+7 + ldd r14,Z+8 + ldd r15,Z+9 + cpi r22,20 + brcs 15f + rcall 153f + ldi r23,1 + eor r6,r23 +15: + cpi r22,19 + brcs 23f + rcall 153f + ldi r23,130 + eor r6,r23 + ldi r17,128 + eor r7,r17 +23: + cpi r22,18 + brcs 31f + rcall 153f + ldi r23,138 + eor r6,r23 + ldi r17,128 + eor r7,r17 +31: + cpi r22,17 + brcs 37f + rcall 153f + ldi r23,128 + eor r7,r23 +37: + cpi r22,16 + brcs 45f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +45: + cpi r22,15 + brcs 51f + rcall 153f + ldi r23,1 + eor r6,r23 +51: + cpi r22,14 + brcs 59f + rcall 153f + ldi r23,129 + eor r6,r23 + ldi r17,128 + eor r7,r17 +59: + cpi r22,13 + brcs 67f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +67: + cpi r22,12 + brcs 73f + rcall 153f + ldi r23,138 + eor r6,r23 +73: + cpi r22,11 + brcs 79f + rcall 153f + ldi r23,136 + eor r6,r23 +79: + cpi r22,10 + brcs 87f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +87: + cpi r22,9 + brcs 93f + rcall 153f + ldi r23,10 + eor r6,r23 +93: + cpi r22,8 + brcs 101f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +101: + cpi r22,7 + brcs 107f + rcall 153f + ldi r23,139 + eor r6,r23 +107: + cpi r22,6 + brcs 115f + rcall 153f + ldi r23,137 + eor r6,r23 + ldi r17,128 + eor r7,r17 +115: + cpi r22,5 + brcs 123f + rcall 153f + ldi r23,3 + eor r6,r23 + ldi r17,128 + eor r7,r17 +123: + cpi r22,4 + brcs 131f + rcall 153f + ldi r23,2 + eor r6,r23 + ldi r17,128 + eor r7,r17 +131: + cpi r22,3 + brcs 137f + rcall 153f + ldi r23,128 + eor r6,r23 +137: + cpi r22,2 + brcs 145f + rcall 153f + ldi r23,10 + eor r6,r23 + ldi r17,128 + eor r7,r17 +145: + cpi r22,1 + brcs 151f + rcall 153f + ldi r23,10 + eor r6,r23 +151: + rjmp 1004f +153: + movw r18,r6 + ldd r0,Z+10 + eor r18,r0 + ldd r0,Z+11 + eor r19,r0 + ldd r0,Z+20 + eor r18,r0 + ldd r0,Z+21 + eor r19,r0 + ldd r0,Z+30 + eor r18,r0 + ldd r0,Z+31 + eor r19,r0 + ldd r0,Z+40 + eor r18,r0 + ldd r0,Z+41 + eor r19,r0 + movw r20,r8 + ldd r0,Z+12 + eor r20,r0 + ldd r0,Z+13 + eor r21,r0 + ldd r0,Z+22 + eor r20,r0 + ldd r0,Z+23 + eor r21,r0 + ldd r0,Z+32 + eor r20,r0 + ldd r0,Z+33 + eor r21,r0 + ldd r0,Z+42 + eor r20,r0 + ldd r0,Z+43 + eor r21,r0 + movw r26,r10 + ldd r0,Z+14 + eor r26,r0 + ldd r0,Z+15 + eor r27,r0 + ldd r0,Z+24 + eor r26,r0 + ldd r0,Z+25 + eor r27,r0 + ldd r0,Z+34 + eor r26,r0 + ldd r0,Z+35 + eor r27,r0 + ldd r0,Z+44 + eor r26,r0 + ldd r0,Z+45 + eor r27,r0 + movw r2,r12 + ldd r0,Z+16 + eor r2,r0 + ldd r0,Z+17 + eor r3,r0 + ldd r0,Z+26 + eor r2,r0 + ldd r0,Z+27 + eor r3,r0 + ldd r0,Z+36 + eor r2,r0 + ldd r0,Z+37 + eor r3,r0 + ldd r0,Z+46 + eor r2,r0 + ldd r0,Z+47 + eor r3,r0 + movw r4,r14 + ldd r0,Z+18 + eor r4,r0 + ldd r0,Z+19 + eor r5,r0 + ldd r0,Z+28 + eor r4,r0 + ldd r0,Z+29 + eor r5,r0 + ldd r0,Z+38 + eor r4,r0 + ldd r0,Z+39 + eor r5,r0 + ldd r0,Z+48 + eor r4,r0 + ldd r0,Z+49 + eor r5,r0 + movw r24,r20 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r4 + eor r25,r5 + eor r6,r24 + eor r7,r25 + ldd r0,Z+10 + eor r0,r24 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r25 + std Z+11,r0 + ldd r0,Z+20 + eor r0,r24 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r25 + std Z+21,r0 + ldd r0,Z+30 + eor r0,r24 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r25 + std Z+31,r0 + ldd r0,Z+40 + eor r0,r24 + std Z+40,r0 + ldd r0,Z+41 + eor r0,r25 + std Z+41,r0 + movw r24,r26 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r8,r24 + eor r9,r25 + ldd r0,Z+12 + eor r0,r24 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r25 + std Z+13,r0 + ldd r0,Z+22 + eor r0,r24 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r25 + std Z+23,r0 + ldd r0,Z+32 + eor r0,r24 + std Z+32,r0 + ldd r0,Z+33 + eor r0,r25 + std Z+33,r0 + ldd r0,Z+42 + eor r0,r24 + std Z+42,r0 + ldd r0,Z+43 + eor r0,r25 + std Z+43,r0 + movw r24,r2 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r10,r24 + eor r11,r25 + ldd r0,Z+14 + eor r0,r24 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r25 + std Z+15,r0 + ldd r0,Z+24 + eor r0,r24 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r25 + std Z+25,r0 + ldd r0,Z+34 + eor r0,r24 + std Z+34,r0 + ldd r0,Z+35 + eor r0,r25 + std Z+35,r0 + ldd r0,Z+44 + eor r0,r24 + std Z+44,r0 + ldd r0,Z+45 + eor r0,r25 + std Z+45,r0 + movw r24,r4 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r26 + eor r25,r27 + eor r12,r24 + eor r13,r25 + ldd r0,Z+16 + eor r0,r24 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r25 + std Z+17,r0 + ldd r0,Z+26 + eor r0,r24 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r25 + std Z+27,r0 + ldd r0,Z+36 + eor r0,r24 + std Z+36,r0 + ldd r0,Z+37 + eor r0,r25 + std Z+37,r0 + ldd r0,Z+46 + eor r0,r24 + std Z+46,r0 + ldd r0,Z+47 + eor r0,r25 + std Z+47,r0 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r2 + eor r25,r3 + eor r14,r24 + eor r15,r25 + ldd r0,Z+18 + eor r0,r24 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r25 + std Z+19,r0 + ldd r0,Z+28 + eor r0,r24 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r25 + std Z+29,r0 + ldd r0,Z+38 + eor r0,r24 + std Z+38,r0 + ldd r0,Z+39 + eor r0,r25 + std Z+39,r0 + ldd r0,Z+48 + eor r0,r24 + std Z+48,r0 + ldd r0,Z+49 + eor r0,r25 + std Z+49,r0 + movw r24,r8 + ldd r8,Z+12 + ldd r9,Z+13 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldd r18,Z+18 + ldd r19,Z+19 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+12,r18 + std Z+13,r19 + ldd r18,Z+44 + ldd r19,Z+45 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+18,r18 + std Z+19,r19 + ldd r18,Z+28 + ldd r19,Z+29 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+44,r18 + std Z+45,r19 + ldd r18,Z+40 + ldd r19,Z+41 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+28,r18 + std Z+29,r19 + movw r18,r10 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+40,r18 + std Z+41,r19 + ldd r10,Z+24 + ldd r11,Z+25 + mov r0,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldd r18,Z+26 + ldd r19,Z+27 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+24,r18 + std Z+25,r19 + ldd r18,Z+38 + ldd r19,Z+39 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+26,r18 + std Z+27,r19 + ldd r18,Z+46 + ldd r19,Z+47 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+38,r18 + std Z+39,r19 + ldd r18,Z+30 + ldd r19,Z+31 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+46,r18 + std Z+47,r19 + movw r18,r14 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+30,r18 + std Z+31,r19 + ldd r14,Z+48 + ldd r15,Z+49 + mov r0,r1 + lsr r15 + ror r14 + ror r0 + lsr r15 + ror r14 + ror r0 + or r15,r0 + ldd r18,Z+42 + ldd r19,Z+43 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+48,r18 + std Z+49,r19 + ldd r18,Z+16 + ldd r19,Z+17 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+42,r18 + std Z+43,r19 + ldd r18,Z+32 + ldd r19,Z+33 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+16,r18 + std Z+17,r19 + ldd r18,Z+10 + ldd r19,Z+11 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+32,r18 + std Z+33,r19 + movw r18,r12 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+10,r18 + std Z+11,r19 + ldd r12,Z+36 + ldd r13,Z+37 + mov r0,r13 + mov r13,r12 + mov r12,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + ldd r18,Z+34 + ldd r19,Z+35 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+36,r18 + std Z+37,r19 + ldd r18,Z+22 + ldd r19,Z+23 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+34,r18 + std Z+35,r19 + ldd r18,Z+14 + ldd r19,Z+15 + mov r0,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+22,r18 + std Z+23,r19 + ldd r18,Z+20 + ldd r19,Z+21 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+14,r18 + std Z+15,r19 + lsl r24 + rol r25 + adc r24,r1 + std Z+20,r24 + std Z+21,r25 + movw r18,r6 + movw r20,r8 + movw r26,r10 + movw r2,r12 + movw r4,r14 + movw r6,r26 + mov r0,r20 + com r0 + and r6,r0 + mov r0,r21 + com r0 + and r7,r0 + eor r6,r18 + eor r7,r19 + movw r8,r2 + mov r0,r26 + com r0 + and r8,r0 + mov r0,r27 + com r0 + and r9,r0 + eor r8,r20 + eor r9,r21 + movw r10,r4 + mov r0,r2 + com r0 + and r10,r0 + mov r0,r3 + com r0 + and r11,r0 + eor r10,r26 + eor r11,r27 + movw r12,r18 + mov r0,r4 + com r0 + and r12,r0 + mov r0,r5 + com r0 + and r13,r0 + eor r12,r2 + eor r13,r3 + movw r14,r20 + mov r0,r18 + com r0 + and r14,r0 + mov r0,r19 + com r0 + and r15,r0 + eor r14,r4 + eor r15,r5 + ldd r18,Z+10 + ldd r19,Z+11 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+10,r24 + std Z+11,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+12,r24 + std Z+13,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+14,r24 + std Z+15,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+16,r24 + std Z+17,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+18,r24 + std Z+19,r25 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+20,r24 + std Z+21,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+22,r24 + std Z+23,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+24,r24 + std Z+25,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+26,r24 + std Z+27,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+28,r24 + std Z+29,r25 + ldd r18,Z+30 + ldd r19,Z+31 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+30,r24 + std Z+31,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+32,r24 + std Z+33,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+34,r24 + std Z+35,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+36,r24 + std Z+37,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+38,r24 + std Z+39,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + ldd r4,Z+48 + ldd r5,Z+49 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+40,r24 + std Z+41,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+42,r24 + std Z+43,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+44,r24 + std Z+45,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+46,r24 + std Z+47,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+48,r24 + std Z+49,r25 + ret +1004: + st Z,r6 + std Z+1,r7 + std Z+2,r8 + std Z+3,r9 + std Z+4,r10 + std Z+5,r11 + std Z+6,r12 + std Z+7,r13 + std Z+8,r14 + std Z+9,r15 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size keccakp_400_permute, .-keccakp_400_permute + +#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.c index c3c4011..60539df 100644 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.c +++ b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.c @@ -22,74 +22,79 @@ #include "internal-keccak.h" +#if !defined(__AVR__) + /* Faster method to compute ((x + y) % 5) that avoids the division */ static unsigned char const addMod5Table[9] = { 0, 1, 2, 3, 4, 0, 1, 2, 3 }; #define addMod5(x, y) (addMod5Table[(x) + (y)]) -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds) +void keccakp_200_permute(keccakp_200_state_t *state) { static uint8_t const RC[18] = { 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, 0x02, 0x80 }; - uint8_t B[5][5]; + uint8_t C[5]; uint8_t D; unsigned round; unsigned index, index2; - for (round = 18 - rounds; round < 18; ++round) { + for (round = 0; round < 18; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_8(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_8(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate4_8(state->A[0][3]); - B[2][0] = leftRotate1_8(state->A[0][1]); - B[3][0] = leftRotate3_8(state->A[0][4]); - B[4][0] = leftRotate6_8(state->A[0][2]); - B[0][1] = leftRotate4_8(state->A[1][1]); - B[1][1] = leftRotate4_8(state->A[1][4]); - B[2][1] = leftRotate6_8(state->A[1][2]); - B[3][1] = leftRotate4_8(state->A[1][0]); - B[4][1] = leftRotate7_8(state->A[1][3]); - B[0][2] = leftRotate3_8(state->A[2][2]); - B[1][2] = leftRotate3_8(state->A[2][0]); - B[2][2] = leftRotate1_8(state->A[2][3]); - B[3][2] = leftRotate2_8(state->A[2][1]); - B[4][2] = leftRotate7_8(state->A[2][4]); - B[0][3] = leftRotate5_8(state->A[3][3]); - B[1][3] = leftRotate5_8(state->A[3][1]); - B[2][3] = state->A[3][4]; - B[3][3] = leftRotate7_8(state->A[3][2]); - B[4][3] = leftRotate1_8(state->A[3][0]); - B[0][4] = leftRotate6_8(state->A[4][4]); - B[1][4] = leftRotate5_8(state->A[4][2]); - B[2][4] = leftRotate2_8(state->A[4][0]); - B[3][4] = state->A[4][3]; - B[4][4] = leftRotate2_8(state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate4_8(state->A[1][1]); + state->A[1][1] = leftRotate4_8(state->A[1][4]); + state->A[1][4] = leftRotate5_8(state->A[4][2]); + state->A[4][2] = leftRotate7_8(state->A[2][4]); + state->A[2][4] = leftRotate2_8(state->A[4][0]); + state->A[4][0] = leftRotate6_8(state->A[0][2]); + state->A[0][2] = leftRotate3_8(state->A[2][2]); + state->A[2][2] = leftRotate1_8(state->A[2][3]); + state->A[2][3] = state->A[3][4]; + state->A[3][4] = state->A[4][3]; + state->A[4][3] = leftRotate1_8(state->A[3][0]); + state->A[3][0] = leftRotate3_8(state->A[0][4]); + state->A[0][4] = leftRotate6_8(state->A[4][4]); + state->A[4][4] = leftRotate2_8(state->A[4][1]); + state->A[4][1] = leftRotate7_8(state->A[1][3]); + state->A[1][3] = leftRotate5_8(state->A[3][1]); + state->A[3][1] = leftRotate4_8(state->A[1][0]); + state->A[1][0] = leftRotate4_8(state->A[0][3]); + state->A[0][3] = leftRotate5_8(state->A[3][3]); + state->A[3][3] = leftRotate7_8(state->A[3][2]); + state->A[3][2] = leftRotate2_8(state->A[2][1]); + state->A[2][1] = leftRotate6_8(state->A[1][2]); + state->A[1][2] = leftRotate3_8(state->A[2][0]); + state->A[2][0] = leftRotate1_8(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -110,61 +115,64 @@ void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, 0x8002, 0x0080, 0x800A, 0x000A }; - uint16_t B[5][5]; + uint16_t C[5]; uint16_t D; unsigned round; unsigned index, index2; for (round = 20 - rounds; round < 20; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_16(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_16(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate12_16(state->A[0][3]); - B[2][0] = leftRotate1_16 (state->A[0][1]); - B[3][0] = leftRotate11_16(state->A[0][4]); - B[4][0] = leftRotate14_16(state->A[0][2]); - B[0][1] = leftRotate12_16(state->A[1][1]); - B[1][1] = leftRotate4_16 (state->A[1][4]); - B[2][1] = leftRotate6_16 (state->A[1][2]); - B[3][1] = leftRotate4_16 (state->A[1][0]); - B[4][1] = leftRotate7_16 (state->A[1][3]); - B[0][2] = leftRotate11_16(state->A[2][2]); - B[1][2] = leftRotate3_16 (state->A[2][0]); - B[2][2] = leftRotate9_16 (state->A[2][3]); - B[3][2] = leftRotate10_16(state->A[2][1]); - B[4][2] = leftRotate7_16 (state->A[2][4]); - B[0][3] = leftRotate5_16 (state->A[3][3]); - B[1][3] = leftRotate13_16(state->A[3][1]); - B[2][3] = leftRotate8_16 (state->A[3][4]); - B[3][3] = leftRotate15_16(state->A[3][2]); - B[4][3] = leftRotate9_16 (state->A[3][0]); - B[0][4] = leftRotate14_16(state->A[4][4]); - B[1][4] = leftRotate13_16(state->A[4][2]); - B[2][4] = leftRotate2_16 (state->A[4][0]); - B[3][4] = leftRotate8_16 (state->A[4][3]); - B[4][4] = leftRotate2_16 (state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate12_16(state->A[1][1]); + state->A[1][1] = leftRotate4_16 (state->A[1][4]); + state->A[1][4] = leftRotate13_16(state->A[4][2]); + state->A[4][2] = leftRotate7_16 (state->A[2][4]); + state->A[2][4] = leftRotate2_16 (state->A[4][0]); + state->A[4][0] = leftRotate14_16(state->A[0][2]); + state->A[0][2] = leftRotate11_16(state->A[2][2]); + state->A[2][2] = leftRotate9_16 (state->A[2][3]); + state->A[2][3] = leftRotate8_16 (state->A[3][4]); + state->A[3][4] = leftRotate8_16 (state->A[4][3]); + state->A[4][3] = leftRotate9_16 (state->A[3][0]); + state->A[3][0] = leftRotate11_16(state->A[0][4]); + state->A[0][4] = leftRotate14_16(state->A[4][4]); + state->A[4][4] = leftRotate2_16 (state->A[4][1]); + state->A[4][1] = leftRotate7_16 (state->A[1][3]); + state->A[1][3] = leftRotate13_16(state->A[3][1]); + state->A[3][1] = leftRotate4_16 (state->A[1][0]); + state->A[1][0] = leftRotate12_16(state->A[0][3]); + state->A[0][3] = leftRotate5_16 (state->A[3][3]); + state->A[3][3] = leftRotate15_16(state->A[3][2]); + state->A[3][2] = leftRotate10_16(state->A[2][1]); + state->A[2][1] = leftRotate6_16 (state->A[1][2]); + state->A[1][2] = leftRotate3_16 (state->A[2][0]); + state->A[2][0] = leftRotate1_16(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -202,3 +210,5 @@ void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) } #endif + +#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.h index 026da50..2ffef42 100644 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.h +++ b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-keccak.h @@ -68,9 +68,8 @@ typedef union * \brief Permutes the Keccak-p[200] state. * * \param state The Keccak-p[200] state to be permuted. - * \param rounds The number of rounds to perform (up to 18). */ -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds); +void keccakp_200_permute(keccakp_200_state_t *state); /** * \brief Permutes the Keccak-p[400] state, which is assumed to be in diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent-avr.S b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent-avr.S new file mode 100644 index 0000000..4a43458 --- /dev/null +++ b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent-avr.S @@ -0,0 +1,1677 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 238 + .byte 237 + .byte 235 + .byte 224 + .byte 226 + .byte 225 + .byte 228 + .byte 239 + .byte 231 + .byte 234 + .byte 232 + .byte 229 + .byte 233 + .byte 236 + .byte 227 + .byte 230 + .byte 222 + .byte 221 + .byte 219 + .byte 208 + .byte 210 + .byte 209 + .byte 212 + .byte 223 + .byte 215 + .byte 218 + .byte 216 + .byte 213 + .byte 217 + .byte 220 + .byte 211 + .byte 214 + .byte 190 + .byte 189 + .byte 187 + .byte 176 + .byte 178 + .byte 177 + .byte 180 + .byte 191 + .byte 183 + .byte 186 + .byte 184 + .byte 181 + .byte 185 + .byte 188 + .byte 179 + .byte 182 + .byte 14 + .byte 13 + .byte 11 + .byte 0 + .byte 2 + .byte 1 + .byte 4 + .byte 15 + .byte 7 + .byte 10 + .byte 8 + .byte 5 + .byte 9 + .byte 12 + .byte 3 + .byte 6 + .byte 46 + .byte 45 + .byte 43 + .byte 32 + .byte 34 + .byte 33 + .byte 36 + .byte 47 + .byte 39 + .byte 42 + .byte 40 + .byte 37 + .byte 41 + .byte 44 + .byte 35 + .byte 38 + .byte 30 + .byte 29 + .byte 27 + .byte 16 + .byte 18 + .byte 17 + .byte 20 + .byte 31 + .byte 23 + .byte 26 + .byte 24 + .byte 21 + .byte 25 + .byte 28 + .byte 19 + .byte 22 + .byte 78 + .byte 77 + .byte 75 + .byte 64 + .byte 66 + .byte 65 + .byte 68 + .byte 79 + .byte 71 + .byte 74 + .byte 72 + .byte 69 + .byte 73 + .byte 76 + .byte 67 + .byte 70 + .byte 254 + .byte 253 + .byte 251 + .byte 240 + .byte 242 + .byte 241 + .byte 244 + .byte 255 + .byte 247 + .byte 250 + .byte 248 + .byte 245 + .byte 249 + .byte 252 + .byte 243 + .byte 246 + .byte 126 + .byte 125 + .byte 123 + .byte 112 + .byte 114 + .byte 113 + .byte 116 + .byte 127 + .byte 119 + .byte 122 + .byte 120 + .byte 117 + .byte 121 + .byte 124 + .byte 115 + .byte 118 + .byte 174 + .byte 173 + .byte 171 + .byte 160 + .byte 162 + .byte 161 + .byte 164 + .byte 175 + .byte 167 + .byte 170 + .byte 168 + .byte 165 + .byte 169 + .byte 172 + .byte 163 + .byte 166 + .byte 142 + .byte 141 + .byte 139 + .byte 128 + .byte 130 + .byte 129 + .byte 132 + .byte 143 + .byte 135 + .byte 138 + .byte 136 + .byte 133 + .byte 137 + .byte 140 + .byte 131 + .byte 134 + .byte 94 + .byte 93 + .byte 91 + .byte 80 + .byte 82 + .byte 81 + .byte 84 + .byte 95 + .byte 87 + .byte 90 + .byte 88 + .byte 85 + .byte 89 + .byte 92 + .byte 83 + .byte 86 + .byte 158 + .byte 157 + .byte 155 + .byte 144 + .byte 146 + .byte 145 + .byte 148 + .byte 159 + .byte 151 + .byte 154 + .byte 152 + .byte 149 + .byte 153 + .byte 156 + .byte 147 + .byte 150 + .byte 206 + .byte 205 + .byte 203 + .byte 192 + .byte 194 + .byte 193 + .byte 196 + .byte 207 + .byte 199 + .byte 202 + .byte 200 + .byte 197 + .byte 201 + .byte 204 + .byte 195 + .byte 198 + .byte 62 + .byte 61 + .byte 59 + .byte 48 + .byte 50 + .byte 49 + .byte 52 + .byte 63 + .byte 55 + .byte 58 + .byte 56 + .byte 53 + .byte 57 + .byte 60 + .byte 51 + .byte 54 + .byte 110 + .byte 109 + .byte 107 + .byte 96 + .byte 98 + .byte 97 + .byte 100 + .byte 111 + .byte 103 + .byte 106 + .byte 104 + .byte 101 + .byte 105 + .byte 108 + .byte 99 + .byte 102 + + .text +.global spongent160_permute + .type spongent160_permute, @function +spongent160_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 +.L__stack_usage = 16 + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + ldd r10,Z+12 + ldd r11,Z+13 + ldd r12,Z+14 + ldd r13,Z+15 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r24,Z+18 + ldd r25,Z+19 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r21,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r21 +#endif + ldi r18,80 + ldi r19,117 + ldi r20,174 +25: + eor r22,r19 + eor r25,r20 + lsl r19 + bst r19,7 + bld r19,0 + mov r0,r1 + bst r19,6 + bld r0,0 + eor r19,r0 + andi r19,127 + lsr r20 + bst r20,0 + bld r20,7 + mov r0,r1 + bst r20,1 + bld r0,7 + eor r20,r0 + andi r20,254 + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r28 +#if defined(RAMPZ) + elpm r28,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r28,Z +#elif defined(__AVR_TINY__) + ld r28,Z +#else + lpm + mov r28,r0 +#endif + mov r30,r29 +#if defined(RAMPZ) + elpm r29,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r29,Z +#elif defined(__AVR_TINY__) + ld r29,Z +#else + lpm + mov r29,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r28,0 + bld r22,4 + bst r6,0 + bld r28,0 + bst r10,1 + bld r6,0 + bst r6,6 + bld r10,1 + bst r13,1 + bld r6,6 + bst r22,7 + bld r13,1 + bst r29,4 + bld r22,7 + bst r12,0 + bld r29,4 + bst r14,2 + bld r12,0 + bst r3,3 + bld r14,2 + bst r23,5 + bld r3,3 + bst r4,4 + bld r23,5 + bst r4,1 + bld r4,4 + bst r2,5 + bld r4,1 + bst r24,4 + bld r2,5 + bst r12,3 + bld r24,4 + bst r15,6 + bld r12,3 + bst r9,3 + bld r15,6 + bst r3,6 + bld r9,3 + bst r29,1 + bld r3,6 + bst r10,4 + bld r29,1 + bst r8,2 + bld r10,4 + bst r23,2 + bld r8,2 + bst r3,0 + bld r23,2 + bst r0,0 + bld r3,0 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r2,0 + bld r23,0 + bst r14,0 + bld r2,0 + bst r2,3 + bld r14,0 + bst r15,4 + bld r2,3 + bst r8,3 + bld r15,4 + bst r23,6 + bld r8,3 + bst r5,0 + bld r23,6 + bst r6,1 + bld r5,0 + bst r10,5 + bld r6,1 + bst r8,6 + bld r10,5 + bst r29,2 + bld r8,6 + bst r11,0 + bld r29,2 + bst r10,2 + bld r11,0 + bst r7,2 + bld r10,2 + bst r15,1 + bld r7,2 + bst r6,7 + bld r15,1 + bst r13,5 + bld r6,7 + bst r28,7 + bld r13,5 + bst r9,4 + bld r28,7 + bst r4,2 + bld r9,4 + bst r3,1 + bld r4,2 + bst r22,5 + bld r3,1 + bst r28,4 + bld r22,5 + bst r8,0 + bld r28,4 + bst r0,0 + bld r8,0 + bst r22,3 + bld r0,0 + bst r23,4 + bld r22,3 + bst r4,0 + bld r23,4 + bst r2,1 + bld r4,0 + bst r14,4 + bld r2,1 + bst r4,3 + bld r14,4 + bst r3,5 + bld r4,3 + bst r28,5 + bld r3,5 + bst r8,4 + bld r28,5 + bst r28,2 + bld r8,4 + bst r7,0 + bld r28,2 + bst r14,1 + bld r7,0 + bst r2,7 + bld r14,1 + bst r25,4 + bld r2,7 + bst r24,3 + bld r25,4 + bst r11,7 + bld r24,3 + bst r13,6 + bld r11,7 + bst r29,3 + bld r13,6 + bst r11,4 + bld r29,3 + bst r12,2 + bld r11,4 + bst r15,2 + bld r12,2 + bst r7,3 + bld r15,2 + bst r15,5 + bld r7,3 + bst r8,7 + bld r15,5 + bst r29,6 + bld r8,7 + bst r13,0 + bld r29,6 + bst r0,0 + bld r13,0 + bst r22,6 + bld r0,0 + bst r29,0 + bld r22,6 + bst r10,0 + bld r29,0 + bst r6,2 + bld r10,0 + bst r11,1 + bld r6,2 + bst r10,6 + bld r11,1 + bst r9,2 + bld r10,6 + bst r3,2 + bld r9,2 + bst r23,1 + bld r3,2 + bst r2,4 + bld r23,1 + bst r24,0 + bld r2,4 + bst r10,3 + bld r24,0 + bst r7,6 + bld r10,3 + bst r25,1 + bld r7,6 + bst r14,7 + bld r25,1 + bst r5,7 + bld r14,7 + bst r9,5 + bld r5,7 + bst r4,6 + bld r9,5 + bst r5,1 + bld r4,6 + bst r6,5 + bld r5,1 + bst r12,5 + bld r6,5 + bst r24,6 + bld r12,5 + bst r13,3 + bld r24,6 + bst r23,7 + bld r13,3 + bst r5,4 + bld r23,7 + bst r8,1 + bld r5,4 + bst r0,0 + bld r8,1 + bst r23,3 + bld r0,0 + bst r3,4 + bld r23,3 + bst r28,1 + bld r3,4 + bst r6,4 + bld r28,1 + bst r12,1 + bld r6,4 + bst r14,6 + bld r12,1 + bst r5,3 + bld r14,6 + bst r7,5 + bld r5,3 + bst r24,5 + bld r7,5 + bst r12,7 + bld r24,5 + bst r25,6 + bld r12,7 + bst r25,3 + bld r25,6 + bst r15,7 + bld r25,3 + bst r9,7 + bld r15,7 + bst r5,6 + bld r9,7 + bst r9,1 + bld r5,6 + bst r2,6 + bld r9,1 + bst r25,0 + bld r2,6 + bst r14,3 + bld r25,0 + bst r3,7 + bld r14,3 + bst r29,5 + bld r3,7 + bst r12,4 + bld r29,5 + bst r24,2 + bld r12,4 + bst r11,3 + bld r24,2 + bst r11,6 + bld r11,3 + bst r13,2 + bld r11,6 + bst r0,0 + bld r13,2 + bst r28,3 + bld r0,0 + bst r7,4 + bld r28,3 + bst r24,1 + bld r7,4 + bst r10,7 + bld r24,1 + bst r9,6 + bld r10,7 + bst r5,2 + bld r9,6 + bst r7,1 + bld r5,2 + bst r14,5 + bld r7,1 + bst r4,7 + bld r14,5 + bst r5,5 + bld r4,7 + bst r8,5 + bld r5,5 + bst r28,6 + bld r8,5 + bst r9,0 + bld r28,6 + bst r2,2 + bld r9,0 + bst r15,0 + bld r2,2 + bst r6,3 + bld r15,0 + bst r11,5 + bld r6,3 + bst r12,6 + bld r11,5 + bst r25,2 + bld r12,6 + bst r15,3 + bld r25,2 + bst r7,7 + bld r15,3 + bst r25,5 + bld r7,7 + bst r24,7 + bld r25,5 + bst r13,7 + bld r24,7 + bst r29,7 + bld r13,7 + bst r13,4 + bld r29,7 + bst r0,0 + bld r13,4 + dec r18 + breq 5389f + rjmp 25b +5389: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + st X+,r22 + st X+,r23 + st X+,r28 + st X+,r29 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size spongent160_permute, .-spongent160_permute + + .text +.global spongent176_permute + .type spongent176_permute, @function +spongent176_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + ldd r10,Z+12 + ldd r11,Z+13 + ldd r12,Z+14 + ldd r13,Z+15 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r24,Z+18 + ldd r25,Z+19 + ldd r16,Z+20 + ldd r17,Z+21 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r21,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r21 +#endif + ldi r18,90 + ldi r19,69 + ldi r20,162 +27: + eor r22,r19 + eor r17,r20 + lsl r19 + bst r19,7 + bld r19,0 + mov r0,r1 + bst r19,6 + bld r0,0 + eor r19,r0 + andi r19,127 + lsr r20 + bst r20,0 + bld r20,7 + mov r0,r1 + bst r20,1 + bld r0,7 + eor r20,r0 + andi r20,254 + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r28 +#if defined(RAMPZ) + elpm r28,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r28,Z +#elif defined(__AVR_TINY__) + ld r28,Z +#else + lpm + mov r28,r0 +#endif + mov r30,r29 +#if defined(RAMPZ) + elpm r29,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r29,Z +#elif defined(__AVR_TINY__) + ld r29,Z +#else + lpm + mov r29,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r28,0 + bld r22,4 + bst r6,0 + bld r28,0 + bst r8,1 + bld r6,0 + bst r24,5 + bld r8,1 + bst r6,7 + bld r24,5 + bst r11,5 + bld r6,7 + bst r8,6 + bld r11,5 + bst r17,1 + bld r8,6 + bst r24,7 + bld r17,1 + bst r7,7 + bld r24,7 + bst r15,5 + bld r7,7 + bst r2,7 + bld r15,5 + bst r25,4 + bld r2,7 + bst r10,3 + bld r25,4 + bst r3,6 + bld r10,3 + bst r23,1 + bld r3,6 + bst r2,4 + bld r23,1 + bst r24,0 + bld r2,4 + bst r4,3 + bld r24,0 + bst r29,5 + bld r4,3 + bst r12,4 + bld r29,5 + bst r12,2 + bld r12,4 + bst r11,2 + bld r12,2 + bst r7,2 + bld r11,2 + bst r13,1 + bld r7,2 + bst r14,6 + bld r13,1 + bst r23,3 + bld r14,6 + bst r3,4 + bld r23,3 + bst r0,0 + bld r3,4 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r2,0 + bld r23,0 + bst r14,0 + bld r2,0 + bst r16,2 + bld r14,0 + bst r13,3 + bld r16,2 + bst r15,6 + bld r13,3 + bst r3,3 + bld r15,6 + bst r17,4 + bld r3,3 + bst r16,3 + bld r17,4 + bst r13,7 + bld r16,3 + bst r25,6 + bld r13,7 + bst r11,3 + bld r25,6 + bst r7,6 + bld r11,3 + bst r15,1 + bld r7,6 + bst r28,7 + bld r15,1 + bst r9,4 + bld r28,7 + bst r28,2 + bld r9,4 + bst r7,0 + bld r28,2 + bst r12,1 + bld r7,0 + bst r10,6 + bld r12,1 + bst r5,2 + bld r10,6 + bst r5,1 + bld r5,2 + bst r4,5 + bld r5,1 + bst r2,5 + bld r4,5 + bst r24,4 + bld r2,5 + bst r6,3 + bld r24,4 + bst r9,5 + bld r6,3 + bst r28,6 + bld r9,5 + bst r9,0 + bld r28,6 + bst r0,0 + bld r9,0 + bst r22,3 + bld r0,0 + bst r23,4 + bld r22,3 + bst r4,0 + bld r23,4 + bst r28,1 + bld r4,0 + bst r6,4 + bld r28,1 + bst r10,1 + bld r6,4 + bst r2,6 + bld r10,1 + bst r25,0 + bld r2,6 + bst r8,3 + bld r25,0 + bst r25,5 + bld r8,3 + bst r10,7 + bld r25,5 + bst r5,6 + bld r10,7 + bst r7,1 + bld r5,6 + bst r12,5 + bld r7,1 + bst r12,6 + bld r12,5 + bst r13,2 + bld r12,6 + bst r15,2 + bld r13,2 + bst r29,3 + bld r15,2 + bst r11,4 + bld r29,3 + bst r8,2 + bld r11,4 + bst r25,1 + bld r8,2 + bst r8,7 + bld r25,1 + bst r17,5 + bld r8,7 + bst r16,7 + bld r17,5 + bst r15,7 + bld r16,7 + bst r3,7 + bld r15,7 + bst r23,5 + bld r3,7 + bst r4,4 + bld r23,5 + bst r2,1 + bld r4,4 + bst r14,4 + bld r2,1 + bst r0,0 + bld r14,4 + bst r22,5 + bld r0,0 + bst r28,4 + bld r22,5 + bst r8,0 + bld r28,4 + bst r24,1 + bld r8,0 + bst r4,7 + bld r24,1 + bst r3,5 + bld r4,7 + bst r0,0 + bld r3,5 + bst r22,6 + bld r0,0 + bst r29,0 + bld r22,6 + bst r10,0 + bld r29,0 + bst r2,2 + bld r10,0 + bst r15,0 + bld r2,2 + bst r28,3 + bld r15,0 + bst r7,4 + bld r28,3 + bst r14,1 + bld r7,4 + bst r16,6 + bld r14,1 + bst r15,3 + bld r16,6 + bst r29,7 + bld r15,3 + bst r13,4 + bld r29,7 + bst r24,2 + bld r13,4 + bst r5,3 + bld r24,2 + bst r5,5 + bld r5,3 + bst r6,5 + bld r5,5 + bst r10,5 + bld r6,5 + bst r4,6 + bld r10,5 + bst r3,1 + bld r4,6 + bst r16,4 + bld r3,1 + bst r14,3 + bld r16,4 + bst r17,6 + bld r14,3 + bst r17,3 + bld r17,6 + bst r25,7 + bld r17,3 + bst r11,7 + bld r25,7 + bst r9,6 + bld r11,7 + bst r29,2 + bld r9,6 + bst r11,0 + bld r29,2 + bst r6,2 + bld r11,0 + bst r9,1 + bld r6,2 + bst r0,0 + bld r9,1 + bst r22,7 + bld r0,0 + bst r29,4 + bld r22,7 + bst r12,0 + bld r29,4 + bst r10,2 + bld r12,0 + bst r3,2 + bld r10,2 + bst r17,0 + bld r3,2 + bst r24,3 + bld r17,0 + bst r5,7 + bld r24,3 + bst r7,5 + bld r5,7 + bst r14,5 + bld r7,5 + bst r0,0 + bld r14,5 + bst r23,2 + bld r0,0 + bst r3,0 + bld r23,2 + bst r16,0 + bld r3,0 + bst r12,3 + bld r16,0 + bst r11,6 + bld r12,3 + bst r9,2 + bld r11,6 + bst r0,0 + bld r9,2 + bst r23,6 + bld r0,0 + bst r5,0 + bld r23,6 + bst r4,1 + bld r5,0 + bst r28,5 + bld r4,1 + bst r8,4 + bld r28,5 + bst r16,1 + bld r8,4 + bst r12,7 + bld r16,1 + bst r13,6 + bld r12,7 + bst r25,2 + bld r13,6 + bst r9,3 + bld r25,2 + bst r0,0 + bld r9,3 + bst r23,7 + bld r0,0 + bst r5,4 + bld r23,7 + bst r6,1 + bld r5,4 + bst r8,5 + bld r6,1 + bst r16,5 + bld r8,5 + bst r14,7 + bld r16,5 + bst r0,0 + bld r14,7 + bst r29,1 + bld r0,0 + bst r10,4 + bld r29,1 + bst r4,2 + bld r10,4 + bst r0,0 + bld r4,2 + bst r29,6 + bld r0,0 + bst r13,0 + bld r29,6 + bst r14,2 + bld r13,0 + bst r17,2 + bld r14,2 + bst r25,3 + bld r17,2 + bst r9,7 + bld r25,3 + bst r0,0 + bld r9,7 + bst r2,3 + bld r0,0 + bst r15,4 + bld r2,3 + bst r0,0 + bld r15,4 + bst r6,6 + bld r0,0 + bst r11,1 + bld r6,6 + bst r0,0 + bld r11,1 + bst r7,3 + bld r0,0 + bst r13,5 + bld r7,3 + bst r24,6 + bld r13,5 + bst r0,0 + bld r24,6 + dec r18 + breq 5445f + rjmp 27b +5445: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + st X+,r22 + st X+,r23 + st X+,r28 + st X+,r29 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + st X+,r16 + st X+,r17 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size spongent176_permute, .-spongent176_permute + +#endif diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent.c b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent.c index 69a8ecb..8e0d57d 100644 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent.c +++ b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-spongent.c @@ -22,6 +22,8 @@ #include "internal-spongent.h" +#if !defined(__AVR__) + /** * \brief Applies the Spongent-pi S-box in parallel to the 8 nibbles * of a 32-bit word. @@ -344,3 +346,5 @@ void spongent176_permute(spongent176_state_t *state) le_store_word16(state->B + 20, x5); /* Last word is only 16 bits */ #endif } + +#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-util.h b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-util.h +++ b/elephant/Implementations/crypto_aead/elephant176v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/api.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.c deleted file mode 100644 index 2f7abb3..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.c +++ /dev/null @@ -1,881 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "elephant.h" -#include "internal-keccak.h" -#include "internal-spongent.h" -#include - -aead_cipher_t const dumbo_cipher = { - "Dumbo", - DUMBO_KEY_SIZE, - DUMBO_NONCE_SIZE, - DUMBO_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - dumbo_aead_encrypt, - dumbo_aead_decrypt -}; - -aead_cipher_t const jumbo_cipher = { - "Jumbo", - JUMBO_KEY_SIZE, - JUMBO_NONCE_SIZE, - JUMBO_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - jumbo_aead_encrypt, - jumbo_aead_decrypt -}; - -aead_cipher_t const delirium_cipher = { - "Delirium", - DELIRIUM_KEY_SIZE, - DELIRIUM_NONCE_SIZE, - DELIRIUM_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - delirium_aead_encrypt, - delirium_aead_decrypt -}; - -/** - * \brief Applies the Dumbo LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void dumbo_lfsr - (unsigned char out[SPONGENT160_STATE_SIZE], - const unsigned char in[SPONGENT160_STATE_SIZE]) -{ - unsigned char temp = - leftRotate3_8(in[0]) ^ (in[3] << 7) ^ (in[13] >> 7); - unsigned index; - for (index = 0; index < SPONGENT160_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[SPONGENT160_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Dumbo. - * - * \param state Points to the Spongent-pi[160] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void dumbo_process_ad - (spongent160_state_t *state, - unsigned char mask[SPONGENT160_STATE_SIZE], - unsigned char next[SPONGENT160_STATE_SIZE], - unsigned char tag[DUMBO_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - dumbo_lfsr(next, mask); - dumbo_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state->B, npub, DUMBO_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = DUMBO_NONCE_SIZE; - while (adlen > 0) { - size = SPONGENT160_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - spongent160_permute(state); - lw_xor_block(state->B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state->B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, DUMBO_TAG_SIZE); - dumbo_lfsr(mask, mask); - dumbo_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, SPONGENT160_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - spongent160_permute(state); - lw_xor_block(state->B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state->B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, DUMBO_TAG_SIZE); -} - -int dumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - spongent160_state_t state; - unsigned char start[SPONGENT160_STATE_SIZE]; - unsigned char mask[SPONGENT160_STATE_SIZE]; - unsigned char next[SPONGENT160_STATE_SIZE]; - unsigned char tag[DUMBO_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DUMBO_KEY_SIZE); - memset(state.B + DUMBO_KEY_SIZE, 0, sizeof(state.B) - DUMBO_KEY_SIZE); - spongent160_permute(&state); - memcpy(mask, state.B, DUMBO_KEY_SIZE); - memset(mask + DUMBO_KEY_SIZE, 0, sizeof(mask) - DUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - dumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= SPONGENT160_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, m, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - memcpy(c, state.B, SPONGENT160_STATE_SIZE); - - /* Authenticate using the next mask */ - dumbo_lfsr(next, mask); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT160_STATE_SIZE); - c += SPONGENT160_STATE_SIZE; - m += SPONGENT160_STATE_SIZE; - mlen -= SPONGENT160_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - dumbo_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, SPONGENT160_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - c += temp; - } else if (*clen != DUMBO_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - state.B[0] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, DUMBO_TAG_SIZE); - return 0; -} - -int dumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - spongent160_state_t state; - unsigned char *mtemp = m; - unsigned char start[SPONGENT160_STATE_SIZE]; - unsigned char mask[SPONGENT160_STATE_SIZE]; - unsigned char next[SPONGENT160_STATE_SIZE]; - unsigned char tag[DUMBO_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DUMBO_TAG_SIZE) - return -1; - *mlen = clen - DUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DUMBO_KEY_SIZE); - memset(state.B + DUMBO_KEY_SIZE, 0, sizeof(state.B) - DUMBO_KEY_SIZE); - spongent160_permute(&state); - memcpy(mask, state.B, DUMBO_KEY_SIZE); - memset(mask + DUMBO_KEY_SIZE, 0, sizeof(mask) - DUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - dumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= DUMBO_TAG_SIZE; - while (clen >= SPONGENT160_STATE_SIZE) { - /* Authenticate using the next mask */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, c, SPONGENT160_STATE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, SPONGENT160_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT160_STATE_SIZE); - c += SPONGENT160_STATE_SIZE; - m += SPONGENT160_STATE_SIZE; - clen -= SPONGENT160_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, SPONGENT160_STATE_SIZE); - lw_xor_block(state.B, npub, DUMBO_NONCE_SIZE); - spongent160_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - dumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT160_STATE_SIZE); - state.B[0] ^= 0x01; - spongent160_permute(&state); - lw_xor_block(state.B, mask, DUMBO_TAG_SIZE); - lw_xor_block(state.B, next, DUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, DUMBO_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, DUMBO_TAG_SIZE); -} - -/** - * \brief Applies the Jumbo LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void jumbo_lfsr - (unsigned char out[SPONGENT176_STATE_SIZE], - const unsigned char in[SPONGENT176_STATE_SIZE]) -{ - unsigned char temp = - leftRotate1_8(in[0]) ^ (in[3] << 7) ^ (in[19] >> 7); - unsigned index; - for (index = 0; index < SPONGENT176_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[SPONGENT176_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Jumbo. - * - * \param state Points to the Spongent-pi[170] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void jumbo_process_ad - (spongent176_state_t *state, - unsigned char mask[SPONGENT176_STATE_SIZE], - unsigned char next[SPONGENT176_STATE_SIZE], - unsigned char tag[JUMBO_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - jumbo_lfsr(next, mask); - jumbo_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state->B, npub, JUMBO_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = JUMBO_NONCE_SIZE; - while (adlen > 0) { - size = SPONGENT176_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - spongent176_permute(state); - lw_xor_block(state->B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state->B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, JUMBO_TAG_SIZE); - jumbo_lfsr(mask, mask); - jumbo_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, SPONGENT176_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - spongent176_permute(state); - lw_xor_block(state->B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state->B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state->B, JUMBO_TAG_SIZE); -} - -int jumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - spongent176_state_t state; - unsigned char start[SPONGENT176_STATE_SIZE]; - unsigned char mask[SPONGENT176_STATE_SIZE]; - unsigned char next[SPONGENT176_STATE_SIZE]; - unsigned char tag[JUMBO_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + JUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, JUMBO_KEY_SIZE); - memset(state.B + JUMBO_KEY_SIZE, 0, sizeof(state.B) - JUMBO_KEY_SIZE); - spongent176_permute(&state); - memcpy(mask, state.B, JUMBO_KEY_SIZE); - memset(mask + JUMBO_KEY_SIZE, 0, sizeof(mask) - JUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - jumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= SPONGENT176_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, m, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - memcpy(c, state.B, SPONGENT176_STATE_SIZE); - - /* Authenticate using the next mask */ - jumbo_lfsr(next, mask); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT176_STATE_SIZE); - c += SPONGENT176_STATE_SIZE; - m += SPONGENT176_STATE_SIZE; - mlen -= SPONGENT176_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - jumbo_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, SPONGENT176_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, next, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - c += temp; - } else if (*clen != JUMBO_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - state.B[0] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, JUMBO_TAG_SIZE); - return 0; -} - -int jumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - spongent176_state_t state; - unsigned char *mtemp = m; - unsigned char start[SPONGENT176_STATE_SIZE]; - unsigned char mask[SPONGENT176_STATE_SIZE]; - unsigned char next[SPONGENT176_STATE_SIZE]; - unsigned char tag[JUMBO_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < JUMBO_TAG_SIZE) - return -1; - *mlen = clen - JUMBO_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, JUMBO_KEY_SIZE); - memset(state.B + JUMBO_KEY_SIZE, 0, sizeof(state.B) - JUMBO_KEY_SIZE); - spongent176_permute(&state); - memcpy(mask, state.B, JUMBO_KEY_SIZE); - memset(mask + JUMBO_KEY_SIZE, 0, sizeof(mask) - JUMBO_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - jumbo_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= JUMBO_TAG_SIZE; - while (clen >= SPONGENT176_STATE_SIZE) { - /* Authenticate using the next mask */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, c, SPONGENT176_STATE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, SPONGENT176_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, SPONGENT176_STATE_SIZE); - c += SPONGENT176_STATE_SIZE; - m += SPONGENT176_STATE_SIZE; - clen -= SPONGENT176_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, SPONGENT176_STATE_SIZE); - lw_xor_block(state.B, npub, JUMBO_NONCE_SIZE); - spongent176_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - jumbo_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, SPONGENT176_STATE_SIZE); - state.B[0] ^= 0x01; - spongent176_permute(&state); - lw_xor_block(state.B, mask, JUMBO_TAG_SIZE); - lw_xor_block(state.B, next, JUMBO_TAG_SIZE); - lw_xor_block(tag, state.B, JUMBO_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, JUMBO_TAG_SIZE); -} - -/** - * \brief Applies the Delirium LFSR to the mask. - * - * \param out The output mask. - * \param in The input mask. - */ -static void delirium_lfsr - (unsigned char out[KECCAKP_200_STATE_SIZE], - const unsigned char in[KECCAKP_200_STATE_SIZE]) -{ - unsigned char temp = - leftRotate1_8(in[0]) ^ leftRotate1_8(in[2]) ^ (in[13] << 1); - unsigned index; - for (index = 0; index < KECCAKP_200_STATE_SIZE - 1; ++index) - out[index] = in[index + 1]; - out[KECCAKP_200_STATE_SIZE - 1] = temp; -} - -/** - * \brief Processes the nonce and associated data for Delirium. - * - * \param state Points to the Keccak[200] state. - * \param mask Points to the initial mask value. - * \param next Points to the next mask value. - * \param tag Points to the ongoing tag that is being computed. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void delirium_process_ad - (keccakp_200_state_t *state, - unsigned char mask[KECCAKP_200_STATE_SIZE], - unsigned char next[KECCAKP_200_STATE_SIZE], - unsigned char tag[DELIRIUM_TAG_SIZE], - const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned posn, size; - - /* We need the "previous" and "next" masks in each step. - * Compare the first such values */ - delirium_lfsr(next, mask); - delirium_lfsr(next, next); - - /* Absorb the nonce into the state */ - lw_xor_block_2_src(state->B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state->B, npub, DELIRIUM_NONCE_SIZE); - - /* Absorb the rest of the associated data */ - posn = DELIRIUM_NONCE_SIZE; - while (adlen > 0) { - size = KECCAKP_200_STATE_SIZE - posn; - if (size <= adlen) { - /* Process a complete block */ - lw_xor_block(state->B + posn, ad, size); - keccakp_200_permute(state); - lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); - delirium_lfsr(mask, mask); - delirium_lfsr(next, next); - lw_xor_block_2_src(state->B, mask, next, KECCAKP_200_STATE_SIZE); - posn = 0; - } else { - /* Process the partial block at the end of the associated data */ - size = (unsigned)adlen; - lw_xor_block(state->B + posn, ad, size); - posn += size; - } - ad += size; - adlen -= size; - } - - /* Pad and absorb the final block */ - state->B[posn] ^= 0x01; - keccakp_200_permute(state); - lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); -} - -int delirium_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - keccakp_200_state_t state; - unsigned char start[KECCAKP_200_STATE_SIZE]; - unsigned char mask[KECCAKP_200_STATE_SIZE]; - unsigned char next[KECCAKP_200_STATE_SIZE]; - unsigned char tag[DELIRIUM_TAG_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + DELIRIUM_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DELIRIUM_KEY_SIZE); - memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state); - memcpy(mask, state.B, DELIRIUM_KEY_SIZE); - memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - delirium_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Encrypt and authenticate the payload */ - while (mlen >= KECCAKP_200_STATE_SIZE) { - /* Encrypt using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, m, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - memcpy(c, state.B, KECCAKP_200_STATE_SIZE); - - /* Authenticate using the next mask */ - delirium_lfsr(next, mask); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, KECCAKP_200_STATE_SIZE); - c += KECCAKP_200_STATE_SIZE; - m += KECCAKP_200_STATE_SIZE; - mlen -= KECCAKP_200_STATE_SIZE; - } - if (mlen > 0) { - /* Encrypt the last block using the current mask */ - unsigned temp = (unsigned)mlen; - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, m, temp); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - memcpy(c, state.B, temp); - - /* Authenticate the last block using the next mask */ - delirium_lfsr(next, mask); - state.B[temp] = 0x01; - memset(state.B + temp + 1, 0, KECCAKP_200_STATE_SIZE - temp - 1); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - c += temp; - } else if (*clen != DELIRIUM_TAG_SIZE) { - /* Pad and authenticate when the last block is aligned */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - state.B[0] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - } - - /* Generate the authentication tag */ - memcpy(c, tag, DELIRIUM_TAG_SIZE); - return 0; -} - -int delirium_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - keccakp_200_state_t state; - unsigned char *mtemp = m; - unsigned char start[KECCAKP_200_STATE_SIZE]; - unsigned char mask[KECCAKP_200_STATE_SIZE]; - unsigned char next[KECCAKP_200_STATE_SIZE]; - unsigned char tag[DELIRIUM_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < DELIRIUM_TAG_SIZE) - return -1; - *mlen = clen - DELIRIUM_TAG_SIZE; - - /* Hash the key and generate the initial mask */ - memcpy(state.B, k, DELIRIUM_KEY_SIZE); - memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state); - memcpy(mask, state.B, DELIRIUM_KEY_SIZE); - memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); - memcpy(start, mask, sizeof(mask)); - - /* Tag starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Authenticate the nonce and the associated data */ - delirium_process_ad(&state, mask, next, tag, npub, ad, adlen); - - /* Reset back to the starting mask for the encryption phase */ - memcpy(mask, start, sizeof(mask)); - - /* Decrypt and authenticate the payload */ - clen -= DELIRIUM_TAG_SIZE; - while (clen >= KECCAKP_200_STATE_SIZE) { - /* Authenticate using the next mask */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, c, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Decrypt using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block_2_src(m, state.B, c, KECCAKP_200_STATE_SIZE); - - /* Advance to the next block */ - memcpy(mask, next, KECCAKP_200_STATE_SIZE); - c += KECCAKP_200_STATE_SIZE; - m += KECCAKP_200_STATE_SIZE; - clen -= KECCAKP_200_STATE_SIZE; - } - if (clen > 0) { - /* Authenticate the last block using the next mask */ - unsigned temp = (unsigned)clen; - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, c, temp); - state.B[temp] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - - /* Decrypt the last block using the current mask */ - memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); - lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, temp); - lw_xor_block_2_src(m, state.B, c, temp); - c += temp; - } else if (*mlen != 0) { - /* Pad and authenticate when the last block is aligned */ - delirium_lfsr(next, mask); - lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); - state.B[0] ^= 0x01; - keccakp_200_permute(&state); - lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); - lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); - lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); - } - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, tag, c, DELIRIUM_TAG_SIZE); -} diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.h deleted file mode 100644 index f775e3d..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/elephant.h +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ELEPHANT_H -#define LWCRYPTO_ELEPHANT_H - -#include "aead-common.h" - -/** - * \file elephant.h - * \brief Elephant authenticated encryption algorithm family. - * - * Elephant is a family of authenticated encryption algorithms based - * around the Spongent-pi and Keccak permutations. - * - * \li Dumbo has a 128-bit key, a 96-bit nonce, and a 64-bit authentication - * tag. It is based around the Spongent-pi[160] permutation. This is - * the primary member of the family. - * \li Jumbo has a 128-bit key, a 96-bit nonce, and a 64-bit authentication - * tag. It is based around the Spongent-pi[176] permutation. - * \li Delirium has a 128-bit key, a 96-bit nonce, and a 128-bit authentication - * tag. It is based around the Keccak[200] permutation. - * - * References: https://www.esat.kuleuven.be/cosic/elephant/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Dumbo. - */ -#define DUMBO_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Dumbo. - */ -#define DUMBO_TAG_SIZE 8 - -/** - * \brief Size of the nonce for Dumbo. - */ -#define DUMBO_NONCE_SIZE 12 - -/** - * \brief Size of the key for Jumbo. - */ -#define JUMBO_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Jumbo. - */ -#define JUMBO_TAG_SIZE 8 - -/** - * \brief Size of the nonce for Jumbo. - */ -#define JUMBO_NONCE_SIZE 12 - -/** - * \brief Size of the key for Delirium. - */ -#define DELIRIUM_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Delirium. - */ -#define DELIRIUM_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Delirium. - */ -#define DELIRIUM_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Dumbo cipher. - */ -extern aead_cipher_t const dumbo_cipher; - -/** - * \brief Meta-information block for the Jumbo cipher. - */ -extern aead_cipher_t const jumbo_cipher; - -/** - * \brief Meta-information block for the Delirium cipher. - */ -extern aead_cipher_t const delirium_cipher; - -/** - * \brief Encrypts and authenticates a packet with Dumbo. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa dumbo_aead_decrypt() - */ -int dumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Dumbo. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa dumbo_aead_encrypt() - */ -int dumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Jumbo. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa jumbo_aead_decrypt() - */ -int jumbo_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Jumbo. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa jumbo_aead_encrypt() - */ -int jumbo_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Delirium. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa delirium_aead_decrypt() - */ -int delirium_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Delirium. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa delirium_aead_encrypt() - */ -int delirium_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/encrypt.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/encrypt.c deleted file mode 100644 index bf6840c..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "elephant.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return delirium_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return delirium_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak-avr.S b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak-avr.S deleted file mode 100644 index e50ccaf..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak-avr.S +++ /dev/null @@ -1,1552 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global keccakp_200_permute - .type keccakp_200_permute, @function -keccakp_200_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - ldd r24,Z+24 - push r31 - push r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,130 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - mov r30,r1 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,129 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - ldi r30,136 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,10 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,137 - eor r18,r30 - rcall 82f - ldi r30,3 - eor r18,r30 - rcall 82f - ldi r30,2 - eor r18,r30 - rcall 82f - ldi r30,128 - eor r18,r30 - rjmp 420f -82: - mov r30,r18 - eor r30,r23 - eor r30,r2 - eor r30,r7 - eor r30,r12 - mov r31,r19 - eor r31,r26 - eor r31,r3 - eor r31,r8 - eor r31,r13 - mov r25,r20 - eor r25,r27 - eor r25,r4 - eor r25,r9 - eor r25,r14 - mov r16,r21 - eor r16,r28 - eor r16,r5 - eor r16,r10 - eor r16,r15 - mov r17,r22 - eor r17,r29 - eor r17,r6 - eor r17,r11 - eor r17,r24 - mov r0,r31 - lsl r0 - adc r0,r1 - eor r0,r17 - eor r18,r0 - eor r23,r0 - eor r2,r0 - eor r7,r0 - eor r12,r0 - mov r0,r25 - lsl r0 - adc r0,r1 - eor r0,r30 - eor r19,r0 - eor r26,r0 - eor r3,r0 - eor r8,r0 - eor r13,r0 - mov r0,r16 - lsl r0 - adc r0,r1 - eor r0,r31 - eor r20,r0 - eor r27,r0 - eor r4,r0 - eor r9,r0 - eor r14,r0 - mov r0,r17 - lsl r0 - adc r0,r1 - eor r0,r25 - eor r21,r0 - eor r28,r0 - eor r5,r0 - eor r10,r0 - eor r15,r0 - mov r0,r30 - lsl r0 - adc r0,r1 - eor r0,r16 - eor r22,r0 - eor r29,r0 - eor r6,r0 - eor r11,r0 - eor r24,r0 - mov r30,r19 - swap r26 - mov r19,r26 - swap r29 - mov r26,r29 - mov r0,r1 - lsr r14 - ror r0 - lsr r14 - ror r0 - lsr r14 - ror r0 - or r14,r0 - mov r29,r14 - bst r6,0 - lsr r6 - bld r6,7 - mov r14,r6 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - mov r6,r12 - mov r0,r1 - lsr r20 - ror r0 - lsr r20 - ror r0 - or r20,r0 - mov r12,r20 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - mov r20,r4 - lsl r5 - adc r5,r1 - mov r4,r5 - mov r5,r11 - mov r11,r15 - lsl r7 - adc r7,r1 - mov r15,r7 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - mov r7,r22 - mov r0,r1 - lsr r24 - ror r0 - lsr r24 - ror r0 - or r24,r0 - mov r22,r24 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r24,r13 - bst r28,0 - lsr r28 - bld r28,7 - mov r13,r28 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r28,r8 - swap r23 - mov r8,r23 - swap r21 - mov r23,r21 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r21,r10 - bst r9,0 - lsr r9 - bld r9,7 - mov r10,r9 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - mov r9,r3 - mov r0,r1 - lsr r27 - ror r0 - lsr r27 - ror r0 - or r27,r0 - mov r3,r27 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - mov r27,r2 - lsl r30 - adc r30,r1 - mov r2,r30 - mov r30,r18 - mov r31,r19 - mov r25,r20 - mov r16,r21 - mov r17,r22 - mov r18,r25 - mov r0,r31 - com r0 - and r18,r0 - eor r18,r30 - mov r19,r16 - mov r0,r25 - com r0 - and r19,r0 - eor r19,r31 - mov r20,r17 - mov r0,r16 - com r0 - and r20,r0 - eor r20,r25 - mov r21,r30 - mov r0,r17 - com r0 - and r21,r0 - eor r21,r16 - mov r22,r31 - mov r0,r30 - com r0 - and r22,r0 - eor r22,r17 - mov r30,r23 - mov r31,r26 - mov r25,r27 - mov r16,r28 - mov r17,r29 - mov r23,r25 - mov r0,r31 - com r0 - and r23,r0 - eor r23,r30 - mov r26,r16 - mov r0,r25 - com r0 - and r26,r0 - eor r26,r31 - mov r27,r17 - mov r0,r16 - com r0 - and r27,r0 - eor r27,r25 - mov r28,r30 - mov r0,r17 - com r0 - and r28,r0 - eor r28,r16 - mov r29,r31 - mov r0,r30 - com r0 - and r29,r0 - eor r29,r17 - mov r30,r2 - mov r31,r3 - mov r25,r4 - mov r16,r5 - mov r17,r6 - mov r2,r25 - mov r0,r31 - com r0 - and r2,r0 - eor r2,r30 - mov r3,r16 - mov r0,r25 - com r0 - and r3,r0 - eor r3,r31 - mov r4,r17 - mov r0,r16 - com r0 - and r4,r0 - eor r4,r25 - mov r5,r30 - mov r0,r17 - com r0 - and r5,r0 - eor r5,r16 - mov r6,r31 - mov r0,r30 - com r0 - and r6,r0 - eor r6,r17 - mov r30,r7 - mov r31,r8 - mov r25,r9 - mov r16,r10 - mov r17,r11 - mov r7,r25 - mov r0,r31 - com r0 - and r7,r0 - eor r7,r30 - mov r8,r16 - mov r0,r25 - com r0 - and r8,r0 - eor r8,r31 - mov r9,r17 - mov r0,r16 - com r0 - and r9,r0 - eor r9,r25 - mov r10,r30 - mov r0,r17 - com r0 - and r10,r0 - eor r10,r16 - mov r11,r31 - mov r0,r30 - com r0 - and r11,r0 - eor r11,r17 - mov r30,r12 - mov r31,r13 - mov r25,r14 - mov r16,r15 - mov r17,r24 - mov r12,r25 - mov r0,r31 - com r0 - and r12,r0 - eor r12,r30 - mov r13,r16 - mov r0,r25 - com r0 - and r13,r0 - eor r13,r31 - mov r14,r17 - mov r0,r16 - com r0 - and r14,r0 - eor r14,r25 - mov r15,r30 - mov r0,r17 - com r0 - and r15,r0 - eor r15,r16 - mov r24,r31 - mov r0,r30 - com r0 - and r24,r0 - eor r24,r17 - ret -420: - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - std Z+24,r24 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size keccakp_200_permute, .-keccakp_200_permute - - .text -.global keccakp_400_permute - .type keccakp_400_permute, @function -keccakp_400_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - movw r30,r24 -.L__stack_usage = 17 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - ldd r10,Z+4 - ldd r11,Z+5 - ldd r12,Z+6 - ldd r13,Z+7 - ldd r14,Z+8 - ldd r15,Z+9 - cpi r22,20 - brcs 15f - rcall 153f - ldi r23,1 - eor r6,r23 -15: - cpi r22,19 - brcs 23f - rcall 153f - ldi r23,130 - eor r6,r23 - ldi r17,128 - eor r7,r17 -23: - cpi r22,18 - brcs 31f - rcall 153f - ldi r23,138 - eor r6,r23 - ldi r17,128 - eor r7,r17 -31: - cpi r22,17 - brcs 37f - rcall 153f - ldi r23,128 - eor r7,r23 -37: - cpi r22,16 - brcs 45f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -45: - cpi r22,15 - brcs 51f - rcall 153f - ldi r23,1 - eor r6,r23 -51: - cpi r22,14 - brcs 59f - rcall 153f - ldi r23,129 - eor r6,r23 - ldi r17,128 - eor r7,r17 -59: - cpi r22,13 - brcs 67f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -67: - cpi r22,12 - brcs 73f - rcall 153f - ldi r23,138 - eor r6,r23 -73: - cpi r22,11 - brcs 79f - rcall 153f - ldi r23,136 - eor r6,r23 -79: - cpi r22,10 - brcs 87f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -87: - cpi r22,9 - brcs 93f - rcall 153f - ldi r23,10 - eor r6,r23 -93: - cpi r22,8 - brcs 101f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -101: - cpi r22,7 - brcs 107f - rcall 153f - ldi r23,139 - eor r6,r23 -107: - cpi r22,6 - brcs 115f - rcall 153f - ldi r23,137 - eor r6,r23 - ldi r17,128 - eor r7,r17 -115: - cpi r22,5 - brcs 123f - rcall 153f - ldi r23,3 - eor r6,r23 - ldi r17,128 - eor r7,r17 -123: - cpi r22,4 - brcs 131f - rcall 153f - ldi r23,2 - eor r6,r23 - ldi r17,128 - eor r7,r17 -131: - cpi r22,3 - brcs 137f - rcall 153f - ldi r23,128 - eor r6,r23 -137: - cpi r22,2 - brcs 145f - rcall 153f - ldi r23,10 - eor r6,r23 - ldi r17,128 - eor r7,r17 -145: - cpi r22,1 - brcs 151f - rcall 153f - ldi r23,10 - eor r6,r23 -151: - rjmp 1004f -153: - movw r18,r6 - ldd r0,Z+10 - eor r18,r0 - ldd r0,Z+11 - eor r19,r0 - ldd r0,Z+20 - eor r18,r0 - ldd r0,Z+21 - eor r19,r0 - ldd r0,Z+30 - eor r18,r0 - ldd r0,Z+31 - eor r19,r0 - ldd r0,Z+40 - eor r18,r0 - ldd r0,Z+41 - eor r19,r0 - movw r20,r8 - ldd r0,Z+12 - eor r20,r0 - ldd r0,Z+13 - eor r21,r0 - ldd r0,Z+22 - eor r20,r0 - ldd r0,Z+23 - eor r21,r0 - ldd r0,Z+32 - eor r20,r0 - ldd r0,Z+33 - eor r21,r0 - ldd r0,Z+42 - eor r20,r0 - ldd r0,Z+43 - eor r21,r0 - movw r26,r10 - ldd r0,Z+14 - eor r26,r0 - ldd r0,Z+15 - eor r27,r0 - ldd r0,Z+24 - eor r26,r0 - ldd r0,Z+25 - eor r27,r0 - ldd r0,Z+34 - eor r26,r0 - ldd r0,Z+35 - eor r27,r0 - ldd r0,Z+44 - eor r26,r0 - ldd r0,Z+45 - eor r27,r0 - movw r2,r12 - ldd r0,Z+16 - eor r2,r0 - ldd r0,Z+17 - eor r3,r0 - ldd r0,Z+26 - eor r2,r0 - ldd r0,Z+27 - eor r3,r0 - ldd r0,Z+36 - eor r2,r0 - ldd r0,Z+37 - eor r3,r0 - ldd r0,Z+46 - eor r2,r0 - ldd r0,Z+47 - eor r3,r0 - movw r4,r14 - ldd r0,Z+18 - eor r4,r0 - ldd r0,Z+19 - eor r5,r0 - ldd r0,Z+28 - eor r4,r0 - ldd r0,Z+29 - eor r5,r0 - ldd r0,Z+38 - eor r4,r0 - ldd r0,Z+39 - eor r5,r0 - ldd r0,Z+48 - eor r4,r0 - ldd r0,Z+49 - eor r5,r0 - movw r24,r20 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r4 - eor r25,r5 - eor r6,r24 - eor r7,r25 - ldd r0,Z+10 - eor r0,r24 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r25 - std Z+11,r0 - ldd r0,Z+20 - eor r0,r24 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r25 - std Z+21,r0 - ldd r0,Z+30 - eor r0,r24 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r25 - std Z+31,r0 - ldd r0,Z+40 - eor r0,r24 - std Z+40,r0 - ldd r0,Z+41 - eor r0,r25 - std Z+41,r0 - movw r24,r26 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r8,r24 - eor r9,r25 - ldd r0,Z+12 - eor r0,r24 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r25 - std Z+13,r0 - ldd r0,Z+22 - eor r0,r24 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r25 - std Z+23,r0 - ldd r0,Z+32 - eor r0,r24 - std Z+32,r0 - ldd r0,Z+33 - eor r0,r25 - std Z+33,r0 - ldd r0,Z+42 - eor r0,r24 - std Z+42,r0 - ldd r0,Z+43 - eor r0,r25 - std Z+43,r0 - movw r24,r2 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r10,r24 - eor r11,r25 - ldd r0,Z+14 - eor r0,r24 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r25 - std Z+15,r0 - ldd r0,Z+24 - eor r0,r24 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r25 - std Z+25,r0 - ldd r0,Z+34 - eor r0,r24 - std Z+34,r0 - ldd r0,Z+35 - eor r0,r25 - std Z+35,r0 - ldd r0,Z+44 - eor r0,r24 - std Z+44,r0 - ldd r0,Z+45 - eor r0,r25 - std Z+45,r0 - movw r24,r4 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r26 - eor r25,r27 - eor r12,r24 - eor r13,r25 - ldd r0,Z+16 - eor r0,r24 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r25 - std Z+17,r0 - ldd r0,Z+26 - eor r0,r24 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r25 - std Z+27,r0 - ldd r0,Z+36 - eor r0,r24 - std Z+36,r0 - ldd r0,Z+37 - eor r0,r25 - std Z+37,r0 - ldd r0,Z+46 - eor r0,r24 - std Z+46,r0 - ldd r0,Z+47 - eor r0,r25 - std Z+47,r0 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r2 - eor r25,r3 - eor r14,r24 - eor r15,r25 - ldd r0,Z+18 - eor r0,r24 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r25 - std Z+19,r0 - ldd r0,Z+28 - eor r0,r24 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r25 - std Z+29,r0 - ldd r0,Z+38 - eor r0,r24 - std Z+38,r0 - ldd r0,Z+39 - eor r0,r25 - std Z+39,r0 - ldd r0,Z+48 - eor r0,r24 - std Z+48,r0 - ldd r0,Z+49 - eor r0,r25 - std Z+49,r0 - movw r24,r8 - ldd r8,Z+12 - ldd r9,Z+13 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldd r18,Z+18 - ldd r19,Z+19 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+12,r18 - std Z+13,r19 - ldd r18,Z+44 - ldd r19,Z+45 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+18,r18 - std Z+19,r19 - ldd r18,Z+28 - ldd r19,Z+29 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+44,r18 - std Z+45,r19 - ldd r18,Z+40 - ldd r19,Z+41 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+28,r18 - std Z+29,r19 - movw r18,r10 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+40,r18 - std Z+41,r19 - ldd r10,Z+24 - ldd r11,Z+25 - mov r0,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldd r18,Z+26 - ldd r19,Z+27 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+24,r18 - std Z+25,r19 - ldd r18,Z+38 - ldd r19,Z+39 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+26,r18 - std Z+27,r19 - ldd r18,Z+46 - ldd r19,Z+47 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+38,r18 - std Z+39,r19 - ldd r18,Z+30 - ldd r19,Z+31 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+46,r18 - std Z+47,r19 - movw r18,r14 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+30,r18 - std Z+31,r19 - ldd r14,Z+48 - ldd r15,Z+49 - mov r0,r1 - lsr r15 - ror r14 - ror r0 - lsr r15 - ror r14 - ror r0 - or r15,r0 - ldd r18,Z+42 - ldd r19,Z+43 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+48,r18 - std Z+49,r19 - ldd r18,Z+16 - ldd r19,Z+17 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+42,r18 - std Z+43,r19 - ldd r18,Z+32 - ldd r19,Z+33 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+16,r18 - std Z+17,r19 - ldd r18,Z+10 - ldd r19,Z+11 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+32,r18 - std Z+33,r19 - movw r18,r12 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+10,r18 - std Z+11,r19 - ldd r12,Z+36 - ldd r13,Z+37 - mov r0,r13 - mov r13,r12 - mov r12,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - ldd r18,Z+34 - ldd r19,Z+35 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+36,r18 - std Z+37,r19 - ldd r18,Z+22 - ldd r19,Z+23 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+34,r18 - std Z+35,r19 - ldd r18,Z+14 - ldd r19,Z+15 - mov r0,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+22,r18 - std Z+23,r19 - ldd r18,Z+20 - ldd r19,Z+21 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+14,r18 - std Z+15,r19 - lsl r24 - rol r25 - adc r24,r1 - std Z+20,r24 - std Z+21,r25 - movw r18,r6 - movw r20,r8 - movw r26,r10 - movw r2,r12 - movw r4,r14 - movw r6,r26 - mov r0,r20 - com r0 - and r6,r0 - mov r0,r21 - com r0 - and r7,r0 - eor r6,r18 - eor r7,r19 - movw r8,r2 - mov r0,r26 - com r0 - and r8,r0 - mov r0,r27 - com r0 - and r9,r0 - eor r8,r20 - eor r9,r21 - movw r10,r4 - mov r0,r2 - com r0 - and r10,r0 - mov r0,r3 - com r0 - and r11,r0 - eor r10,r26 - eor r11,r27 - movw r12,r18 - mov r0,r4 - com r0 - and r12,r0 - mov r0,r5 - com r0 - and r13,r0 - eor r12,r2 - eor r13,r3 - movw r14,r20 - mov r0,r18 - com r0 - and r14,r0 - mov r0,r19 - com r0 - and r15,r0 - eor r14,r4 - eor r15,r5 - ldd r18,Z+10 - ldd r19,Z+11 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+10,r24 - std Z+11,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+12,r24 - std Z+13,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+14,r24 - std Z+15,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+16,r24 - std Z+17,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+18,r24 - std Z+19,r25 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+20,r24 - std Z+21,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+22,r24 - std Z+23,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+24,r24 - std Z+25,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+26,r24 - std Z+27,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+28,r24 - std Z+29,r25 - ldd r18,Z+30 - ldd r19,Z+31 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+30,r24 - std Z+31,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+32,r24 - std Z+33,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+34,r24 - std Z+35,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+36,r24 - std Z+37,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+38,r24 - std Z+39,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - ldd r4,Z+48 - ldd r5,Z+49 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+40,r24 - std Z+41,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+42,r24 - std Z+43,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+44,r24 - std Z+45,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+46,r24 - std Z+47,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+48,r24 - std Z+49,r25 - ret -1004: - st Z,r6 - std Z+1,r7 - std Z+2,r8 - std Z+3,r9 - std Z+4,r10 - std Z+5,r11 - std Z+6,r12 - std Z+7,r13 - std Z+8,r14 - std Z+9,r15 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size keccakp_400_permute, .-keccakp_400_permute - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.c deleted file mode 100644 index 60539df..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-keccak.h" - -#if !defined(__AVR__) - -/* Faster method to compute ((x + y) % 5) that avoids the division */ -static unsigned char const addMod5Table[9] = { - 0, 1, 2, 3, 4, 0, 1, 2, 3 -}; -#define addMod5(x, y) (addMod5Table[(x) + (y)]) - -void keccakp_200_permute(keccakp_200_state_t *state) -{ - static uint8_t const RC[18] = { - 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, - 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, - 0x02, 0x80 - }; - uint8_t C[5]; - uint8_t D; - unsigned round; - unsigned index, index2; - for (round = 0; round < 18; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_8(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate4_8(state->A[1][1]); - state->A[1][1] = leftRotate4_8(state->A[1][4]); - state->A[1][4] = leftRotate5_8(state->A[4][2]); - state->A[4][2] = leftRotate7_8(state->A[2][4]); - state->A[2][4] = leftRotate2_8(state->A[4][0]); - state->A[4][0] = leftRotate6_8(state->A[0][2]); - state->A[0][2] = leftRotate3_8(state->A[2][2]); - state->A[2][2] = leftRotate1_8(state->A[2][3]); - state->A[2][3] = state->A[3][4]; - state->A[3][4] = state->A[4][3]; - state->A[4][3] = leftRotate1_8(state->A[3][0]); - state->A[3][0] = leftRotate3_8(state->A[0][4]); - state->A[0][4] = leftRotate6_8(state->A[4][4]); - state->A[4][4] = leftRotate2_8(state->A[4][1]); - state->A[4][1] = leftRotate7_8(state->A[1][3]); - state->A[1][3] = leftRotate5_8(state->A[3][1]); - state->A[3][1] = leftRotate4_8(state->A[1][0]); - state->A[1][0] = leftRotate4_8(state->A[0][3]); - state->A[0][3] = leftRotate5_8(state->A[3][3]); - state->A[3][3] = leftRotate7_8(state->A[3][2]); - state->A[3][2] = leftRotate2_8(state->A[2][1]); - state->A[2][1] = leftRotate6_8(state->A[1][2]); - state->A[1][2] = leftRotate3_8(state->A[2][0]); - state->A[2][0] = leftRotate1_8(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define keccakp_400_permute_host keccakp_400_permute -#endif - -/* Keccak-p[400] that assumes that the input is already in host byte order */ -void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) -{ - static uint16_t const RC[20] = { - 0x0001, 0x8082, 0x808A, 0x8000, 0x808B, 0x0001, 0x8081, 0x8009, - 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, - 0x8002, 0x0080, 0x800A, 0x000A - }; - uint16_t C[5]; - uint16_t D; - unsigned round; - unsigned index, index2; - for (round = 20 - rounds; round < 20; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_16(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate12_16(state->A[1][1]); - state->A[1][1] = leftRotate4_16 (state->A[1][4]); - state->A[1][4] = leftRotate13_16(state->A[4][2]); - state->A[4][2] = leftRotate7_16 (state->A[2][4]); - state->A[2][4] = leftRotate2_16 (state->A[4][0]); - state->A[4][0] = leftRotate14_16(state->A[0][2]); - state->A[0][2] = leftRotate11_16(state->A[2][2]); - state->A[2][2] = leftRotate9_16 (state->A[2][3]); - state->A[2][3] = leftRotate8_16 (state->A[3][4]); - state->A[3][4] = leftRotate8_16 (state->A[4][3]); - state->A[4][3] = leftRotate9_16 (state->A[3][0]); - state->A[3][0] = leftRotate11_16(state->A[0][4]); - state->A[0][4] = leftRotate14_16(state->A[4][4]); - state->A[4][4] = leftRotate2_16 (state->A[4][1]); - state->A[4][1] = leftRotate7_16 (state->A[1][3]); - state->A[1][3] = leftRotate13_16(state->A[3][1]); - state->A[3][1] = leftRotate4_16 (state->A[1][0]); - state->A[1][0] = leftRotate12_16(state->A[0][3]); - state->A[0][3] = leftRotate5_16 (state->A[3][3]); - state->A[3][3] = leftRotate15_16(state->A[3][2]); - state->A[3][2] = leftRotate10_16(state->A[2][1]); - state->A[2][1] = leftRotate6_16 (state->A[1][2]); - state->A[1][2] = leftRotate3_16 (state->A[2][0]); - state->A[2][0] = leftRotate1_16(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if !defined(LW_UTIL_LITTLE_ENDIAN) - -/** - * \brief Reverses the bytes in a Keccak-p[400] state. - * - * \param state The Keccak-p[400] state to apply byte-reversal to. - */ -static void keccakp_400_reverse_bytes(keccakp_400_state_t *state) -{ - unsigned index; - unsigned char temp1; - unsigned char temp2; - for (index = 0; index < 50; index += 2) { - temp1 = state->B[index]; - temp2 = state->B[index + 1]; - state->B[index] = temp2; - state->B[index + 1] = temp1; - } -} - -/* Keccak-p[400] that requires byte reversal on input and output */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) -{ - keccakp_400_reverse_bytes(state); - keccakp_400_permute_host(state, rounds); - keccakp_400_reverse_bytes(state); -} - -#endif - -#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.h deleted file mode 100644 index 2ffef42..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-keccak.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KECCAK_H -#define LW_INTERNAL_KECCAK_H - -#include "internal-util.h" - -/** - * \file internal-keccak.h - * \brief Internal implementation of the Keccak-p permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for the Keccak-p[200] permutation. - */ -#define KECCAKP_200_STATE_SIZE 25 - -/** - * \brief Size of the state for the Keccak-p[400] permutation. - */ -#define KECCAKP_400_STATE_SIZE 50 - -/** - * \brief Structure of the internal state of the Keccak-p[200] permutation. - */ -typedef union -{ - uint8_t A[5][5]; /**< Keccak-p[200] state as a 5x5 array of lanes */ - uint8_t B[25]; /**< Keccak-p[200] state as a byte array */ - -} keccakp_200_state_t; - -/** - * \brief Structure of the internal state of the Keccak-p[400] permutation. - */ -typedef union -{ - uint16_t A[5][5]; /**< Keccak-p[400] state as a 5x5 array of lanes */ - uint8_t B[50]; /**< Keccak-p[400] state as a byte array */ - -} keccakp_400_state_t; - -/** - * \brief Permutes the Keccak-p[200] state. - * - * \param state The Keccak-p[200] state to be permuted. - */ -void keccakp_200_permute(keccakp_200_state_t *state); - -/** - * \brief Permutes the Keccak-p[400] state, which is assumed to be in - * little-endian byte order. - * - * \param state The Keccak-p[400] state to be permuted. - * \param rounds The number of rounds to perform (up to 20). - */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent-avr.S b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent-avr.S deleted file mode 100644 index 4a43458..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent-avr.S +++ /dev/null @@ -1,1677 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 238 - .byte 237 - .byte 235 - .byte 224 - .byte 226 - .byte 225 - .byte 228 - .byte 239 - .byte 231 - .byte 234 - .byte 232 - .byte 229 - .byte 233 - .byte 236 - .byte 227 - .byte 230 - .byte 222 - .byte 221 - .byte 219 - .byte 208 - .byte 210 - .byte 209 - .byte 212 - .byte 223 - .byte 215 - .byte 218 - .byte 216 - .byte 213 - .byte 217 - .byte 220 - .byte 211 - .byte 214 - .byte 190 - .byte 189 - .byte 187 - .byte 176 - .byte 178 - .byte 177 - .byte 180 - .byte 191 - .byte 183 - .byte 186 - .byte 184 - .byte 181 - .byte 185 - .byte 188 - .byte 179 - .byte 182 - .byte 14 - .byte 13 - .byte 11 - .byte 0 - .byte 2 - .byte 1 - .byte 4 - .byte 15 - .byte 7 - .byte 10 - .byte 8 - .byte 5 - .byte 9 - .byte 12 - .byte 3 - .byte 6 - .byte 46 - .byte 45 - .byte 43 - .byte 32 - .byte 34 - .byte 33 - .byte 36 - .byte 47 - .byte 39 - .byte 42 - .byte 40 - .byte 37 - .byte 41 - .byte 44 - .byte 35 - .byte 38 - .byte 30 - .byte 29 - .byte 27 - .byte 16 - .byte 18 - .byte 17 - .byte 20 - .byte 31 - .byte 23 - .byte 26 - .byte 24 - .byte 21 - .byte 25 - .byte 28 - .byte 19 - .byte 22 - .byte 78 - .byte 77 - .byte 75 - .byte 64 - .byte 66 - .byte 65 - .byte 68 - .byte 79 - .byte 71 - .byte 74 - .byte 72 - .byte 69 - .byte 73 - .byte 76 - .byte 67 - .byte 70 - .byte 254 - .byte 253 - .byte 251 - .byte 240 - .byte 242 - .byte 241 - .byte 244 - .byte 255 - .byte 247 - .byte 250 - .byte 248 - .byte 245 - .byte 249 - .byte 252 - .byte 243 - .byte 246 - .byte 126 - .byte 125 - .byte 123 - .byte 112 - .byte 114 - .byte 113 - .byte 116 - .byte 127 - .byte 119 - .byte 122 - .byte 120 - .byte 117 - .byte 121 - .byte 124 - .byte 115 - .byte 118 - .byte 174 - .byte 173 - .byte 171 - .byte 160 - .byte 162 - .byte 161 - .byte 164 - .byte 175 - .byte 167 - .byte 170 - .byte 168 - .byte 165 - .byte 169 - .byte 172 - .byte 163 - .byte 166 - .byte 142 - .byte 141 - .byte 139 - .byte 128 - .byte 130 - .byte 129 - .byte 132 - .byte 143 - .byte 135 - .byte 138 - .byte 136 - .byte 133 - .byte 137 - .byte 140 - .byte 131 - .byte 134 - .byte 94 - .byte 93 - .byte 91 - .byte 80 - .byte 82 - .byte 81 - .byte 84 - .byte 95 - .byte 87 - .byte 90 - .byte 88 - .byte 85 - .byte 89 - .byte 92 - .byte 83 - .byte 86 - .byte 158 - .byte 157 - .byte 155 - .byte 144 - .byte 146 - .byte 145 - .byte 148 - .byte 159 - .byte 151 - .byte 154 - .byte 152 - .byte 149 - .byte 153 - .byte 156 - .byte 147 - .byte 150 - .byte 206 - .byte 205 - .byte 203 - .byte 192 - .byte 194 - .byte 193 - .byte 196 - .byte 207 - .byte 199 - .byte 202 - .byte 200 - .byte 197 - .byte 201 - .byte 204 - .byte 195 - .byte 198 - .byte 62 - .byte 61 - .byte 59 - .byte 48 - .byte 50 - .byte 49 - .byte 52 - .byte 63 - .byte 55 - .byte 58 - .byte 56 - .byte 53 - .byte 57 - .byte 60 - .byte 51 - .byte 54 - .byte 110 - .byte 109 - .byte 107 - .byte 96 - .byte 98 - .byte 97 - .byte 100 - .byte 111 - .byte 103 - .byte 106 - .byte 104 - .byte 101 - .byte 105 - .byte 108 - .byte 99 - .byte 102 - - .text -.global spongent160_permute - .type spongent160_permute, @function -spongent160_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 -.L__stack_usage = 16 - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - ldd r10,Z+12 - ldd r11,Z+13 - ldd r12,Z+14 - ldd r13,Z+15 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r24,Z+18 - ldd r25,Z+19 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r21,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r21 -#endif - ldi r18,80 - ldi r19,117 - ldi r20,174 -25: - eor r22,r19 - eor r25,r20 - lsl r19 - bst r19,7 - bld r19,0 - mov r0,r1 - bst r19,6 - bld r0,0 - eor r19,r0 - andi r19,127 - lsr r20 - bst r20,0 - bld r20,7 - mov r0,r1 - bst r20,1 - bld r0,7 - eor r20,r0 - andi r20,254 - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r28 -#if defined(RAMPZ) - elpm r28,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r28,Z -#elif defined(__AVR_TINY__) - ld r28,Z -#else - lpm - mov r28,r0 -#endif - mov r30,r29 -#if defined(RAMPZ) - elpm r29,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r29,Z -#elif defined(__AVR_TINY__) - ld r29,Z -#else - lpm - mov r29,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r28,0 - bld r22,4 - bst r6,0 - bld r28,0 - bst r10,1 - bld r6,0 - bst r6,6 - bld r10,1 - bst r13,1 - bld r6,6 - bst r22,7 - bld r13,1 - bst r29,4 - bld r22,7 - bst r12,0 - bld r29,4 - bst r14,2 - bld r12,0 - bst r3,3 - bld r14,2 - bst r23,5 - bld r3,3 - bst r4,4 - bld r23,5 - bst r4,1 - bld r4,4 - bst r2,5 - bld r4,1 - bst r24,4 - bld r2,5 - bst r12,3 - bld r24,4 - bst r15,6 - bld r12,3 - bst r9,3 - bld r15,6 - bst r3,6 - bld r9,3 - bst r29,1 - bld r3,6 - bst r10,4 - bld r29,1 - bst r8,2 - bld r10,4 - bst r23,2 - bld r8,2 - bst r3,0 - bld r23,2 - bst r0,0 - bld r3,0 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r2,0 - bld r23,0 - bst r14,0 - bld r2,0 - bst r2,3 - bld r14,0 - bst r15,4 - bld r2,3 - bst r8,3 - bld r15,4 - bst r23,6 - bld r8,3 - bst r5,0 - bld r23,6 - bst r6,1 - bld r5,0 - bst r10,5 - bld r6,1 - bst r8,6 - bld r10,5 - bst r29,2 - bld r8,6 - bst r11,0 - bld r29,2 - bst r10,2 - bld r11,0 - bst r7,2 - bld r10,2 - bst r15,1 - bld r7,2 - bst r6,7 - bld r15,1 - bst r13,5 - bld r6,7 - bst r28,7 - bld r13,5 - bst r9,4 - bld r28,7 - bst r4,2 - bld r9,4 - bst r3,1 - bld r4,2 - bst r22,5 - bld r3,1 - bst r28,4 - bld r22,5 - bst r8,0 - bld r28,4 - bst r0,0 - bld r8,0 - bst r22,3 - bld r0,0 - bst r23,4 - bld r22,3 - bst r4,0 - bld r23,4 - bst r2,1 - bld r4,0 - bst r14,4 - bld r2,1 - bst r4,3 - bld r14,4 - bst r3,5 - bld r4,3 - bst r28,5 - bld r3,5 - bst r8,4 - bld r28,5 - bst r28,2 - bld r8,4 - bst r7,0 - bld r28,2 - bst r14,1 - bld r7,0 - bst r2,7 - bld r14,1 - bst r25,4 - bld r2,7 - bst r24,3 - bld r25,4 - bst r11,7 - bld r24,3 - bst r13,6 - bld r11,7 - bst r29,3 - bld r13,6 - bst r11,4 - bld r29,3 - bst r12,2 - bld r11,4 - bst r15,2 - bld r12,2 - bst r7,3 - bld r15,2 - bst r15,5 - bld r7,3 - bst r8,7 - bld r15,5 - bst r29,6 - bld r8,7 - bst r13,0 - bld r29,6 - bst r0,0 - bld r13,0 - bst r22,6 - bld r0,0 - bst r29,0 - bld r22,6 - bst r10,0 - bld r29,0 - bst r6,2 - bld r10,0 - bst r11,1 - bld r6,2 - bst r10,6 - bld r11,1 - bst r9,2 - bld r10,6 - bst r3,2 - bld r9,2 - bst r23,1 - bld r3,2 - bst r2,4 - bld r23,1 - bst r24,0 - bld r2,4 - bst r10,3 - bld r24,0 - bst r7,6 - bld r10,3 - bst r25,1 - bld r7,6 - bst r14,7 - bld r25,1 - bst r5,7 - bld r14,7 - bst r9,5 - bld r5,7 - bst r4,6 - bld r9,5 - bst r5,1 - bld r4,6 - bst r6,5 - bld r5,1 - bst r12,5 - bld r6,5 - bst r24,6 - bld r12,5 - bst r13,3 - bld r24,6 - bst r23,7 - bld r13,3 - bst r5,4 - bld r23,7 - bst r8,1 - bld r5,4 - bst r0,0 - bld r8,1 - bst r23,3 - bld r0,0 - bst r3,4 - bld r23,3 - bst r28,1 - bld r3,4 - bst r6,4 - bld r28,1 - bst r12,1 - bld r6,4 - bst r14,6 - bld r12,1 - bst r5,3 - bld r14,6 - bst r7,5 - bld r5,3 - bst r24,5 - bld r7,5 - bst r12,7 - bld r24,5 - bst r25,6 - bld r12,7 - bst r25,3 - bld r25,6 - bst r15,7 - bld r25,3 - bst r9,7 - bld r15,7 - bst r5,6 - bld r9,7 - bst r9,1 - bld r5,6 - bst r2,6 - bld r9,1 - bst r25,0 - bld r2,6 - bst r14,3 - bld r25,0 - bst r3,7 - bld r14,3 - bst r29,5 - bld r3,7 - bst r12,4 - bld r29,5 - bst r24,2 - bld r12,4 - bst r11,3 - bld r24,2 - bst r11,6 - bld r11,3 - bst r13,2 - bld r11,6 - bst r0,0 - bld r13,2 - bst r28,3 - bld r0,0 - bst r7,4 - bld r28,3 - bst r24,1 - bld r7,4 - bst r10,7 - bld r24,1 - bst r9,6 - bld r10,7 - bst r5,2 - bld r9,6 - bst r7,1 - bld r5,2 - bst r14,5 - bld r7,1 - bst r4,7 - bld r14,5 - bst r5,5 - bld r4,7 - bst r8,5 - bld r5,5 - bst r28,6 - bld r8,5 - bst r9,0 - bld r28,6 - bst r2,2 - bld r9,0 - bst r15,0 - bld r2,2 - bst r6,3 - bld r15,0 - bst r11,5 - bld r6,3 - bst r12,6 - bld r11,5 - bst r25,2 - bld r12,6 - bst r15,3 - bld r25,2 - bst r7,7 - bld r15,3 - bst r25,5 - bld r7,7 - bst r24,7 - bld r25,5 - bst r13,7 - bld r24,7 - bst r29,7 - bld r13,7 - bst r13,4 - bld r29,7 - bst r0,0 - bld r13,4 - dec r18 - breq 5389f - rjmp 25b -5389: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - st X+,r22 - st X+,r23 - st X+,r28 - st X+,r29 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size spongent160_permute, .-spongent160_permute - - .text -.global spongent176_permute - .type spongent176_permute, @function -spongent176_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - ldd r10,Z+12 - ldd r11,Z+13 - ldd r12,Z+14 - ldd r13,Z+15 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r24,Z+18 - ldd r25,Z+19 - ldd r16,Z+20 - ldd r17,Z+21 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r21,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r21 -#endif - ldi r18,90 - ldi r19,69 - ldi r20,162 -27: - eor r22,r19 - eor r17,r20 - lsl r19 - bst r19,7 - bld r19,0 - mov r0,r1 - bst r19,6 - bld r0,0 - eor r19,r0 - andi r19,127 - lsr r20 - bst r20,0 - bld r20,7 - mov r0,r1 - bst r20,1 - bld r0,7 - eor r20,r0 - andi r20,254 - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r28 -#if defined(RAMPZ) - elpm r28,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r28,Z -#elif defined(__AVR_TINY__) - ld r28,Z -#else - lpm - mov r28,r0 -#endif - mov r30,r29 -#if defined(RAMPZ) - elpm r29,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r29,Z -#elif defined(__AVR_TINY__) - ld r29,Z -#else - lpm - mov r29,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r28,0 - bld r22,4 - bst r6,0 - bld r28,0 - bst r8,1 - bld r6,0 - bst r24,5 - bld r8,1 - bst r6,7 - bld r24,5 - bst r11,5 - bld r6,7 - bst r8,6 - bld r11,5 - bst r17,1 - bld r8,6 - bst r24,7 - bld r17,1 - bst r7,7 - bld r24,7 - bst r15,5 - bld r7,7 - bst r2,7 - bld r15,5 - bst r25,4 - bld r2,7 - bst r10,3 - bld r25,4 - bst r3,6 - bld r10,3 - bst r23,1 - bld r3,6 - bst r2,4 - bld r23,1 - bst r24,0 - bld r2,4 - bst r4,3 - bld r24,0 - bst r29,5 - bld r4,3 - bst r12,4 - bld r29,5 - bst r12,2 - bld r12,4 - bst r11,2 - bld r12,2 - bst r7,2 - bld r11,2 - bst r13,1 - bld r7,2 - bst r14,6 - bld r13,1 - bst r23,3 - bld r14,6 - bst r3,4 - bld r23,3 - bst r0,0 - bld r3,4 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r2,0 - bld r23,0 - bst r14,0 - bld r2,0 - bst r16,2 - bld r14,0 - bst r13,3 - bld r16,2 - bst r15,6 - bld r13,3 - bst r3,3 - bld r15,6 - bst r17,4 - bld r3,3 - bst r16,3 - bld r17,4 - bst r13,7 - bld r16,3 - bst r25,6 - bld r13,7 - bst r11,3 - bld r25,6 - bst r7,6 - bld r11,3 - bst r15,1 - bld r7,6 - bst r28,7 - bld r15,1 - bst r9,4 - bld r28,7 - bst r28,2 - bld r9,4 - bst r7,0 - bld r28,2 - bst r12,1 - bld r7,0 - bst r10,6 - bld r12,1 - bst r5,2 - bld r10,6 - bst r5,1 - bld r5,2 - bst r4,5 - bld r5,1 - bst r2,5 - bld r4,5 - bst r24,4 - bld r2,5 - bst r6,3 - bld r24,4 - bst r9,5 - bld r6,3 - bst r28,6 - bld r9,5 - bst r9,0 - bld r28,6 - bst r0,0 - bld r9,0 - bst r22,3 - bld r0,0 - bst r23,4 - bld r22,3 - bst r4,0 - bld r23,4 - bst r28,1 - bld r4,0 - bst r6,4 - bld r28,1 - bst r10,1 - bld r6,4 - bst r2,6 - bld r10,1 - bst r25,0 - bld r2,6 - bst r8,3 - bld r25,0 - bst r25,5 - bld r8,3 - bst r10,7 - bld r25,5 - bst r5,6 - bld r10,7 - bst r7,1 - bld r5,6 - bst r12,5 - bld r7,1 - bst r12,6 - bld r12,5 - bst r13,2 - bld r12,6 - bst r15,2 - bld r13,2 - bst r29,3 - bld r15,2 - bst r11,4 - bld r29,3 - bst r8,2 - bld r11,4 - bst r25,1 - bld r8,2 - bst r8,7 - bld r25,1 - bst r17,5 - bld r8,7 - bst r16,7 - bld r17,5 - bst r15,7 - bld r16,7 - bst r3,7 - bld r15,7 - bst r23,5 - bld r3,7 - bst r4,4 - bld r23,5 - bst r2,1 - bld r4,4 - bst r14,4 - bld r2,1 - bst r0,0 - bld r14,4 - bst r22,5 - bld r0,0 - bst r28,4 - bld r22,5 - bst r8,0 - bld r28,4 - bst r24,1 - bld r8,0 - bst r4,7 - bld r24,1 - bst r3,5 - bld r4,7 - bst r0,0 - bld r3,5 - bst r22,6 - bld r0,0 - bst r29,0 - bld r22,6 - bst r10,0 - bld r29,0 - bst r2,2 - bld r10,0 - bst r15,0 - bld r2,2 - bst r28,3 - bld r15,0 - bst r7,4 - bld r28,3 - bst r14,1 - bld r7,4 - bst r16,6 - bld r14,1 - bst r15,3 - bld r16,6 - bst r29,7 - bld r15,3 - bst r13,4 - bld r29,7 - bst r24,2 - bld r13,4 - bst r5,3 - bld r24,2 - bst r5,5 - bld r5,3 - bst r6,5 - bld r5,5 - bst r10,5 - bld r6,5 - bst r4,6 - bld r10,5 - bst r3,1 - bld r4,6 - bst r16,4 - bld r3,1 - bst r14,3 - bld r16,4 - bst r17,6 - bld r14,3 - bst r17,3 - bld r17,6 - bst r25,7 - bld r17,3 - bst r11,7 - bld r25,7 - bst r9,6 - bld r11,7 - bst r29,2 - bld r9,6 - bst r11,0 - bld r29,2 - bst r6,2 - bld r11,0 - bst r9,1 - bld r6,2 - bst r0,0 - bld r9,1 - bst r22,7 - bld r0,0 - bst r29,4 - bld r22,7 - bst r12,0 - bld r29,4 - bst r10,2 - bld r12,0 - bst r3,2 - bld r10,2 - bst r17,0 - bld r3,2 - bst r24,3 - bld r17,0 - bst r5,7 - bld r24,3 - bst r7,5 - bld r5,7 - bst r14,5 - bld r7,5 - bst r0,0 - bld r14,5 - bst r23,2 - bld r0,0 - bst r3,0 - bld r23,2 - bst r16,0 - bld r3,0 - bst r12,3 - bld r16,0 - bst r11,6 - bld r12,3 - bst r9,2 - bld r11,6 - bst r0,0 - bld r9,2 - bst r23,6 - bld r0,0 - bst r5,0 - bld r23,6 - bst r4,1 - bld r5,0 - bst r28,5 - bld r4,1 - bst r8,4 - bld r28,5 - bst r16,1 - bld r8,4 - bst r12,7 - bld r16,1 - bst r13,6 - bld r12,7 - bst r25,2 - bld r13,6 - bst r9,3 - bld r25,2 - bst r0,0 - bld r9,3 - bst r23,7 - bld r0,0 - bst r5,4 - bld r23,7 - bst r6,1 - bld r5,4 - bst r8,5 - bld r6,1 - bst r16,5 - bld r8,5 - bst r14,7 - bld r16,5 - bst r0,0 - bld r14,7 - bst r29,1 - bld r0,0 - bst r10,4 - bld r29,1 - bst r4,2 - bld r10,4 - bst r0,0 - bld r4,2 - bst r29,6 - bld r0,0 - bst r13,0 - bld r29,6 - bst r14,2 - bld r13,0 - bst r17,2 - bld r14,2 - bst r25,3 - bld r17,2 - bst r9,7 - bld r25,3 - bst r0,0 - bld r9,7 - bst r2,3 - bld r0,0 - bst r15,4 - bld r2,3 - bst r0,0 - bld r15,4 - bst r6,6 - bld r0,0 - bst r11,1 - bld r6,6 - bst r0,0 - bld r11,1 - bst r7,3 - bld r0,0 - bst r13,5 - bld r7,3 - bst r24,6 - bld r13,5 - bst r0,0 - bld r24,6 - dec r18 - breq 5445f - rjmp 27b -5445: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - st X+,r22 - st X+,r23 - st X+,r28 - st X+,r29 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - st X+,r24 - st X+,r25 - st X+,r16 - st X+,r17 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size spongent176_permute, .-spongent176_permute - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.c deleted file mode 100644 index 8e0d57d..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-spongent.h" - -#if !defined(__AVR__) - -/** - * \brief Applies the Spongent-pi S-box in parallel to the 8 nibbles - * of a 32-bit word. - * - * \param x3 The input values to the parallel S-boxes. - * - * \return The output values from the parallel S-boxes. - * - * Based on the bit-sliced S-box implementation from here: - * https://github.com/DadaIsCrazy/usuba/blob/master/data/sboxes/spongent.ua - * - * Note that spongent.ua numbers bits from highest to lowest, so x0 is the - * high bit of each nibble and x3 is the low bit. - */ -static uint32_t spongent_sbox(uint32_t x3) -{ - uint32_t q0, q1, q2, q3, t0, t1, t2, t3; - uint32_t x2 = (x3 >> 1); - uint32_t x1 = (x2 >> 1); - uint32_t x0 = (x1 >> 1); - q0 = x0 ^ x2; - q1 = x1 ^ x2; - t0 = q0 & q1; - q2 = ~(x0 ^ x1 ^ x3 ^ t0); - t1 = q2 & ~x0; - q3 = x1 ^ t1; - t2 = q3 & (q3 ^ x2 ^ x3 ^ t0); - t3 = (x2 ^ t0) & ~(x1 ^ t0); - q0 = x1 ^ x2 ^ x3 ^ t2; - q1 = x0 ^ x2 ^ x3 ^ t0 ^ t1; - q2 = x0 ^ x1 ^ x2 ^ t1; - q3 = x0 ^ x3 ^ t0 ^ t3; - return ((q0 << 3) & 0x88888888U) | ((q1 << 2) & 0x44444444U) | - ((q2 << 1) & 0x22222222U) | (q3 & 0x11111111U); -} - -void spongent160_permute(spongent160_state_t *state) -{ - static uint8_t const RC[] = { - /* Round constants for Spongent-pi[160] */ - 0x75, 0xae, 0x6a, 0x56, 0x54, 0x2a, 0x29, 0x94, - 0x53, 0xca, 0x27, 0xe4, 0x4f, 0xf2, 0x1f, 0xf8, - 0x3e, 0x7c, 0x7d, 0xbe, 0x7a, 0x5e, 0x74, 0x2e, - 0x68, 0x16, 0x50, 0x0a, 0x21, 0x84, 0x43, 0xc2, - 0x07, 0xe0, 0x0e, 0x70, 0x1c, 0x38, 0x38, 0x1c, - 0x71, 0x8e, 0x62, 0x46, 0x44, 0x22, 0x09, 0x90, - 0x12, 0x48, 0x24, 0x24, 0x49, 0x92, 0x13, 0xc8, - 0x26, 0x64, 0x4d, 0xb2, 0x1b, 0xd8, 0x36, 0x6c, - 0x6d, 0xb6, 0x5a, 0x5a, 0x35, 0xac, 0x6b, 0xd6, - 0x56, 0x6a, 0x2d, 0xb4, 0x5b, 0xda, 0x37, 0xec, - 0x6f, 0xf6, 0x5e, 0x7a, 0x3d, 0xbc, 0x7b, 0xde, - 0x76, 0x6e, 0x6c, 0x36, 0x58, 0x1a, 0x31, 0x8c, - 0x63, 0xc6, 0x46, 0x62, 0x0d, 0xb0, 0x1a, 0x58, - 0x34, 0x2c, 0x69, 0x96, 0x52, 0x4a, 0x25, 0xa4, - 0x4b, 0xd2, 0x17, 0xe8, 0x2e, 0x74, 0x5d, 0xba, - 0x3b, 0xdc, 0x77, 0xee, 0x6e, 0x76, 0x5c, 0x3a, - 0x39, 0x9c, 0x73, 0xce, 0x66, 0x66, 0x4c, 0x32, - 0x19, 0x98, 0x32, 0x4c, 0x65, 0xa6, 0x4a, 0x52, - 0x15, 0xa8, 0x2a, 0x54, 0x55, 0xaa, 0x2b, 0xd4, - 0x57, 0xea, 0x2f, 0xf4, 0x5f, 0xfa, 0x3f, 0xfc - }; - const uint8_t *rc = RC; - uint32_t x0, x1, x2, x3, x4; - uint32_t t0, t1, t2, t3, t4; - uint8_t round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = state->W[0]; - x1 = state->W[1]; - x2 = state->W[2]; - x3 = state->W[3]; - x4 = state->W[4]; -#else - x0 = le_load_word32(state->B); - x1 = le_load_word32(state->B + 4); - x2 = le_load_word32(state->B + 8); - x3 = le_load_word32(state->B + 12); - x4 = le_load_word32(state->B + 16); -#endif - - /* Perform the 80 rounds of Spongent-pi[160] */ - for (round = 0; round < 80; ++round, rc += 2) { - /* Add the round constant to front and back of the state */ - x0 ^= rc[0]; - x4 ^= ((uint32_t)(rc[1])) << 24; - - /* Apply the S-box to all 4-bit groups in the state */ - t0 = spongent_sbox(x0); - t1 = spongent_sbox(x1); - t2 = spongent_sbox(x2); - t3 = spongent_sbox(x3); - t4 = spongent_sbox(x4); - - /* Permute the bits of the state. Bit i is moved to (40 * i) % 159 - * for all bits except the last which is left where it is. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - #define BCP(x, bit) ((x) & (((uint32_t)1) << (bit))) - #define BUP(x, from, to) \ - (((x) << ((to) - (from))) & (((uint32_t)1) << (to))) - #define BDN(x, from, to) \ - (((x) >> ((from) - (to))) & (((uint32_t)1) << (to))) - x0 = BCP(t0, 0) ^ BDN(t0, 4, 1) ^ BDN(t0, 8, 2) ^ - BDN(t0, 12, 3) ^ BDN(t0, 16, 4) ^ BDN(t0, 20, 5) ^ - BDN(t0, 24, 6) ^ BDN(t0, 28, 7) ^ BUP(t1, 0, 8) ^ - BUP(t1, 4, 9) ^ BUP(t1, 8, 10) ^ BDN(t1, 12, 11) ^ - BDN(t1, 16, 12) ^ BDN(t1, 20, 13) ^ BDN(t1, 24, 14) ^ - BDN(t1, 28, 15) ^ BUP(t2, 0, 16) ^ BUP(t2, 4, 17) ^ - BUP(t2, 8, 18) ^ BUP(t2, 12, 19) ^ BUP(t2, 16, 20) ^ - BUP(t2, 20, 21) ^ BDN(t2, 24, 22) ^ BDN(t2, 28, 23) ^ - BUP(t3, 0, 24) ^ BUP(t3, 4, 25) ^ BUP(t3, 8, 26) ^ - BUP(t3, 12, 27) ^ BUP(t3, 16, 28) ^ BUP(t3, 20, 29) ^ - BUP(t3, 24, 30) ^ BUP(t3, 28, 31); - x1 = BUP(t0, 1, 8) ^ BUP(t0, 5, 9) ^ BUP(t0, 9, 10) ^ - BDN(t0, 13, 11) ^ BDN(t0, 17, 12) ^ BDN(t0, 21, 13) ^ - BDN(t0, 25, 14) ^ BDN(t0, 29, 15) ^ BUP(t1, 1, 16) ^ - BUP(t1, 5, 17) ^ BUP(t1, 9, 18) ^ BUP(t1, 13, 19) ^ - BUP(t1, 17, 20) ^ BCP(t1, 21) ^ BDN(t1, 25, 22) ^ - BDN(t1, 29, 23) ^ BUP(t2, 1, 24) ^ BUP(t2, 5, 25) ^ - BUP(t2, 9, 26) ^ BUP(t2, 13, 27) ^ BUP(t2, 17, 28) ^ - BUP(t2, 21, 29) ^ BUP(t2, 25, 30) ^ BUP(t2, 29, 31) ^ - BCP(t4, 0) ^ BDN(t4, 4, 1) ^ BDN(t4, 8, 2) ^ - BDN(t4, 12, 3) ^ BDN(t4, 16, 4) ^ BDN(t4, 20, 5) ^ - BDN(t4, 24, 6) ^ BDN(t4, 28, 7); - x2 = BUP(t0, 2, 16) ^ BUP(t0, 6, 17) ^ BUP(t0, 10, 18) ^ - BUP(t0, 14, 19) ^ BUP(t0, 18, 20) ^ BDN(t0, 22, 21) ^ - BDN(t0, 26, 22) ^ BDN(t0, 30, 23) ^ BUP(t1, 2, 24) ^ - BUP(t1, 6, 25) ^ BUP(t1, 10, 26) ^ BUP(t1, 14, 27) ^ - BUP(t1, 18, 28) ^ BUP(t1, 22, 29) ^ BUP(t1, 26, 30) ^ - BUP(t1, 30, 31) ^ BDN(t3, 1, 0) ^ BDN(t3, 5, 1) ^ - BDN(t3, 9, 2) ^ BDN(t3, 13, 3) ^ BDN(t3, 17, 4) ^ - BDN(t3, 21, 5) ^ BDN(t3, 25, 6) ^ BDN(t3, 29, 7) ^ - BUP(t4, 1, 8) ^ BUP(t4, 5, 9) ^ BUP(t4, 9, 10) ^ - BDN(t4, 13, 11) ^ BDN(t4, 17, 12) ^ BDN(t4, 21, 13) ^ - BDN(t4, 25, 14) ^ BDN(t4, 29, 15); - x3 = BUP(t0, 3, 24) ^ BUP(t0, 7, 25) ^ BUP(t0, 11, 26) ^ - BUP(t0, 15, 27) ^ BUP(t0, 19, 28) ^ BUP(t0, 23, 29) ^ - BUP(t0, 27, 30) ^ BCP(t0, 31) ^ BDN(t2, 2, 0) ^ - BDN(t2, 6, 1) ^ BDN(t2, 10, 2) ^ BDN(t2, 14, 3) ^ - BDN(t2, 18, 4) ^ BDN(t2, 22, 5) ^ BDN(t2, 26, 6) ^ - BDN(t2, 30, 7) ^ BUP(t3, 2, 8) ^ BUP(t3, 6, 9) ^ - BCP(t3, 10) ^ BDN(t3, 14, 11) ^ BDN(t3, 18, 12) ^ - BDN(t3, 22, 13) ^ BDN(t3, 26, 14) ^ BDN(t3, 30, 15) ^ - BUP(t4, 2, 16) ^ BUP(t4, 6, 17) ^ BUP(t4, 10, 18) ^ - BUP(t4, 14, 19) ^ BUP(t4, 18, 20) ^ BDN(t4, 22, 21) ^ - BDN(t4, 26, 22) ^ BDN(t4, 30, 23); - x4 = BDN(t1, 3, 0) ^ BDN(t1, 7, 1) ^ BDN(t1, 11, 2) ^ - BDN(t1, 15, 3) ^ BDN(t1, 19, 4) ^ BDN(t1, 23, 5) ^ - BDN(t1, 27, 6) ^ BDN(t1, 31, 7) ^ BUP(t2, 3, 8) ^ - BUP(t2, 7, 9) ^ BDN(t2, 11, 10) ^ BDN(t2, 15, 11) ^ - BDN(t2, 19, 12) ^ BDN(t2, 23, 13) ^ BDN(t2, 27, 14) ^ - BDN(t2, 31, 15) ^ BUP(t3, 3, 16) ^ BUP(t3, 7, 17) ^ - BUP(t3, 11, 18) ^ BUP(t3, 15, 19) ^ BUP(t3, 19, 20) ^ - BDN(t3, 23, 21) ^ BDN(t3, 27, 22) ^ BDN(t3, 31, 23) ^ - BUP(t4, 3, 24) ^ BUP(t4, 7, 25) ^ BUP(t4, 11, 26) ^ - BUP(t4, 15, 27) ^ BUP(t4, 19, 28) ^ BUP(t4, 23, 29) ^ - BUP(t4, 27, 30) ^ BCP(t4, 31); - } - - /* Store the local variables back to the state in little-endian order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = x0; - state->W[1] = x1; - state->W[2] = x2; - state->W[3] = x3; - state->W[4] = x4; -#else - le_store_word32(state->B, x0); - le_store_word32(state->B + 4, x1); - le_store_word32(state->B + 8, x2); - le_store_word32(state->B + 12, x3); - le_store_word32(state->B + 16, x4); -#endif -} - -void spongent176_permute(spongent176_state_t *state) -{ - static uint8_t const RC[] = { - /* Round constants for Spongent-pi[176] */ - 0x45, 0xa2, 0x0b, 0xd0, 0x16, 0x68, 0x2c, 0x34, - 0x59, 0x9a, 0x33, 0xcc, 0x67, 0xe6, 0x4e, 0x72, - 0x1d, 0xb8, 0x3a, 0x5c, 0x75, 0xae, 0x6a, 0x56, - 0x54, 0x2a, 0x29, 0x94, 0x53, 0xca, 0x27, 0xe4, - 0x4f, 0xf2, 0x1f, 0xf8, 0x3e, 0x7c, 0x7d, 0xbe, - 0x7a, 0x5e, 0x74, 0x2e, 0x68, 0x16, 0x50, 0x0a, - 0x21, 0x84, 0x43, 0xc2, 0x07, 0xe0, 0x0e, 0x70, - 0x1c, 0x38, 0x38, 0x1c, 0x71, 0x8e, 0x62, 0x46, - 0x44, 0x22, 0x09, 0x90, 0x12, 0x48, 0x24, 0x24, - 0x49, 0x92, 0x13, 0xc8, 0x26, 0x64, 0x4d, 0xb2, - 0x1b, 0xd8, 0x36, 0x6c, 0x6d, 0xb6, 0x5a, 0x5a, - 0x35, 0xac, 0x6b, 0xd6, 0x56, 0x6a, 0x2d, 0xb4, - 0x5b, 0xda, 0x37, 0xec, 0x6f, 0xf6, 0x5e, 0x7a, - 0x3d, 0xbc, 0x7b, 0xde, 0x76, 0x6e, 0x6c, 0x36, - 0x58, 0x1a, 0x31, 0x8c, 0x63, 0xc6, 0x46, 0x62, - 0x0d, 0xb0, 0x1a, 0x58, 0x34, 0x2c, 0x69, 0x96, - 0x52, 0x4a, 0x25, 0xa4, 0x4b, 0xd2, 0x17, 0xe8, - 0x2e, 0x74, 0x5d, 0xba, 0x3b, 0xdc, 0x77, 0xee, - 0x6e, 0x76, 0x5c, 0x3a, 0x39, 0x9c, 0x73, 0xce, - 0x66, 0x66, 0x4c, 0x32, 0x19, 0x98, 0x32, 0x4c, - 0x65, 0xa6, 0x4a, 0x52, 0x15, 0xa8, 0x2a, 0x54, - 0x55, 0xaa, 0x2b, 0xd4, 0x57, 0xea, 0x2f, 0xf4, - 0x5f, 0xfa, 0x3f, 0xfc - }; - const uint8_t *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t t0, t1, t2, t3, t4, t5; - uint8_t round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = state->W[0]; - x1 = state->W[1]; - x2 = state->W[2]; - x3 = state->W[3]; - x4 = state->W[4]; - x5 = state->W[5]; -#else - x0 = le_load_word32(state->B); - x1 = le_load_word32(state->B + 4); - x2 = le_load_word32(state->B + 8); - x3 = le_load_word32(state->B + 12); - x4 = le_load_word32(state->B + 16); - x5 = le_load_word16(state->B + 20); /* Last word is only 16 bits */ -#endif - - /* Perform the 90 rounds of Spongent-pi[176] */ - for (round = 0; round < 90; ++round, rc += 2) { - /* Add the round constant to front and back of the state */ - x0 ^= rc[0]; - x5 ^= ((uint32_t)(rc[1])) << 8; - - /* Apply the S-box to all 4-bit groups in the state */ - t0 = spongent_sbox(x0); - t1 = spongent_sbox(x1); - t2 = spongent_sbox(x2); - t3 = spongent_sbox(x3); - t4 = spongent_sbox(x4); - t5 = spongent_sbox(x5); - - /* Permute the bits of the state. Bit i is moved to (44 * i) % 175 - * for all bits except the last which is left where it is. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - x0 = BCP(t0, 0) ^ BDN(t0, 4, 1) ^ BDN(t0, 8, 2) ^ - BDN(t0, 12, 3) ^ BDN(t0, 16, 4) ^ BDN(t0, 20, 5) ^ - BDN(t0, 24, 6) ^ BDN(t0, 28, 7) ^ BUP(t1, 0, 8) ^ - BUP(t1, 4, 9) ^ BUP(t1, 8, 10) ^ BDN(t1, 12, 11) ^ - BDN(t1, 16, 12) ^ BDN(t1, 20, 13) ^ BDN(t1, 24, 14) ^ - BDN(t1, 28, 15) ^ BUP(t2, 0, 16) ^ BUP(t2, 4, 17) ^ - BUP(t2, 8, 18) ^ BUP(t2, 12, 19) ^ BUP(t2, 16, 20) ^ - BUP(t2, 20, 21) ^ BDN(t2, 24, 22) ^ BDN(t2, 28, 23) ^ - BUP(t3, 0, 24) ^ BUP(t3, 4, 25) ^ BUP(t3, 8, 26) ^ - BUP(t3, 12, 27) ^ BUP(t3, 16, 28) ^ BUP(t3, 20, 29) ^ - BUP(t3, 24, 30) ^ BUP(t3, 28, 31); - x1 = BUP(t0, 1, 12) ^ BUP(t0, 5, 13) ^ BUP(t0, 9, 14) ^ - BUP(t0, 13, 15) ^ BDN(t0, 17, 16) ^ BDN(t0, 21, 17) ^ - BDN(t0, 25, 18) ^ BDN(t0, 29, 19) ^ BUP(t1, 1, 20) ^ - BUP(t1, 5, 21) ^ BUP(t1, 9, 22) ^ BUP(t1, 13, 23) ^ - BUP(t1, 17, 24) ^ BUP(t1, 21, 25) ^ BUP(t1, 25, 26) ^ - BDN(t1, 29, 27) ^ BUP(t2, 1, 28) ^ BUP(t2, 5, 29) ^ - BUP(t2, 9, 30) ^ BUP(t2, 13, 31) ^ BCP(t4, 0) ^ - BDN(t4, 4, 1) ^ BDN(t4, 8, 2) ^ BDN(t4, 12, 3) ^ - BDN(t4, 16, 4) ^ BDN(t4, 20, 5) ^ BDN(t4, 24, 6) ^ - BDN(t4, 28, 7) ^ BUP(t5, 0, 8) ^ BUP(t5, 4, 9) ^ - BUP(t5, 8, 10) ^ BDN(t5, 12, 11); - x2 = BUP(t0, 2, 24) ^ BUP(t0, 6, 25) ^ BUP(t0, 10, 26) ^ - BUP(t0, 14, 27) ^ BUP(t0, 18, 28) ^ BUP(t0, 22, 29) ^ - BUP(t0, 26, 30) ^ BUP(t0, 30, 31) ^ BDN(t2, 17, 0) ^ - BDN(t2, 21, 1) ^ BDN(t2, 25, 2) ^ BDN(t2, 29, 3) ^ - BUP(t3, 1, 4) ^ BCP(t3, 5) ^ BDN(t3, 9, 6) ^ - BDN(t3, 13, 7) ^ BDN(t3, 17, 8) ^ BDN(t3, 21, 9) ^ - BDN(t3, 25, 10) ^ BDN(t3, 29, 11) ^ BUP(t4, 1, 12) ^ - BUP(t4, 5, 13) ^ BUP(t4, 9, 14) ^ BUP(t4, 13, 15) ^ - BDN(t4, 17, 16) ^ BDN(t4, 21, 17) ^ BDN(t4, 25, 18) ^ - BDN(t4, 29, 19) ^ BUP(t5, 1, 20) ^ BUP(t5, 5, 21) ^ - BUP(t5, 9, 22) ^ BUP(t5, 13, 23); - x3 = BDN(t1, 2, 0) ^ BDN(t1, 6, 1) ^ BDN(t1, 10, 2) ^ - BDN(t1, 14, 3) ^ BDN(t1, 18, 4) ^ BDN(t1, 22, 5) ^ - BDN(t1, 26, 6) ^ BDN(t1, 30, 7) ^ BUP(t2, 2, 8) ^ - BUP(t2, 6, 9) ^ BCP(t2, 10) ^ BDN(t2, 14, 11) ^ - BDN(t2, 18, 12) ^ BDN(t2, 22, 13) ^ BDN(t2, 26, 14) ^ - BDN(t2, 30, 15) ^ BUP(t3, 2, 16) ^ BUP(t3, 6, 17) ^ - BUP(t3, 10, 18) ^ BUP(t3, 14, 19) ^ BUP(t3, 18, 20) ^ - BDN(t3, 22, 21) ^ BDN(t3, 26, 22) ^ BDN(t3, 30, 23) ^ - BUP(t4, 2, 24) ^ BUP(t4, 6, 25) ^ BUP(t4, 10, 26) ^ - BUP(t4, 14, 27) ^ BUP(t4, 18, 28) ^ BUP(t4, 22, 29) ^ - BUP(t4, 26, 30) ^ BUP(t4, 30, 31); - x4 = BUP(t0, 3, 4) ^ BDN(t0, 7, 5) ^ BDN(t0, 11, 6) ^ - BDN(t0, 15, 7) ^ BDN(t0, 19, 8) ^ BDN(t0, 23, 9) ^ - BDN(t0, 27, 10) ^ BDN(t0, 31, 11) ^ BUP(t1, 3, 12) ^ - BUP(t1, 7, 13) ^ BUP(t1, 11, 14) ^ BCP(t1, 15) ^ - BDN(t1, 19, 16) ^ BDN(t1, 23, 17) ^ BDN(t1, 27, 18) ^ - BDN(t1, 31, 19) ^ BUP(t2, 3, 20) ^ BUP(t2, 7, 21) ^ - BUP(t2, 11, 22) ^ BUP(t2, 15, 23) ^ BUP(t2, 19, 24) ^ - BUP(t2, 23, 25) ^ BDN(t2, 27, 26) ^ BDN(t2, 31, 27) ^ - BUP(t3, 3, 28) ^ BUP(t3, 7, 29) ^ BUP(t3, 11, 30) ^ - BUP(t3, 15, 31) ^ BDN(t5, 2, 0) ^ BDN(t5, 6, 1) ^ - BDN(t5, 10, 2) ^ BDN(t5, 14, 3); - x5 = BDN(t3, 19, 0) ^ BDN(t3, 23, 1) ^ BDN(t3, 27, 2) ^ - BDN(t3, 31, 3) ^ BUP(t4, 3, 4) ^ BDN(t4, 7, 5) ^ - BDN(t4, 11, 6) ^ BDN(t4, 15, 7) ^ BDN(t4, 19, 8) ^ - BDN(t4, 23, 9) ^ BDN(t4, 27, 10) ^ BDN(t4, 31, 11) ^ - BUP(t5, 3, 12) ^ BUP(t5, 7, 13) ^ BUP(t5, 11, 14) ^ - BCP(t5, 15); - } - - /* Store the local variables back to the state in little-endian order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = x0; - state->W[1] = x1; - state->W[2] = x2; - state->W[3] = x3; - state->W[4] = x4; - state->W[5] = x5; -#else - le_store_word32(state->B, x0); - le_store_word32(state->B + 4, x1); - le_store_word32(state->B + 8, x2); - le_store_word32(state->B + 12, x3); - le_store_word32(state->B + 16, x4); - le_store_word16(state->B + 20, x5); /* Last word is only 16 bits */ -#endif -} - -#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.h deleted file mode 100644 index bb9823f..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-spongent.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPONGENT_H -#define LW_INTERNAL_SPONGENT_H - -#include "internal-util.h" - -/** - * \file internal-spongent.h - * \brief Internal implementation of the Spongent-pi permutation. - * - * References: https://www.esat.kuleuven.be/cosic/elephant/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the Spongent-pi[160] state in bytes. - */ -#define SPONGENT160_STATE_SIZE 20 - -/** - * \brief Size of the Spongent-pi[176] state in bytes. - */ -#define SPONGENT176_STATE_SIZE 22 - -/** - * \brief Structure of the internal state of the Spongent-pi[160] permutation. - */ -typedef union -{ - uint32_t W[5]; /**< Spongent-pi[160] state as 32-bit words */ - uint8_t B[20]; /**< Spongent-pi[160] state as bytes */ - -} spongent160_state_t; - -/** - * \brief Structure of the internal state of the Spongent-pi[176] permutation. - * - * Note: The state is technically only 176 bits, but we increase it to - * 192 bits so that we can use 32-bit word operations to manipulate the - * state. The extra bits in the last word are fixed to zero. - */ -typedef union -{ - uint32_t W[6]; /**< Spongent-pi[176] state as 32-bit words */ - uint8_t B[24]; /**< Spongent-pi[176] state as bytes */ - -} spongent176_state_t; - -/** - * \brief Permutes the Spongent-pi[160] state. - * - * \param state The Spongent-pi[160] state to be permuted. - */ -void spongent160_permute(spongent160_state_t *state); - -/** - * \brief Permutes the Spongent-pi[176] state. - * - * \param state The Spongent-pi[176] state to be permuted. - */ -void spongent176_permute(spongent176_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-util.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys/elephant.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys/elephant.c index 770f568..2f7abb3 100644 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys/elephant.c +++ b/elephant/Implementations/crypto_aead/elephant200v1/rhys/elephant.c @@ -660,7 +660,7 @@ static void delirium_process_ad if (size <= adlen) { /* Process a complete block */ lw_xor_block(state->B + posn, ad, size); - keccakp_200_permute(state, 18); + keccakp_200_permute(state); lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); @@ -680,7 +680,7 @@ static void delirium_process_ad /* Pad and absorb the final block */ state->B[posn] ^= 0x01; - keccakp_200_permute(state, 18); + keccakp_200_permute(state); lw_xor_block(state->B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state->B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state->B, DELIRIUM_TAG_SIZE); @@ -707,7 +707,7 @@ int delirium_aead_encrypt /* Hash the key and generate the initial mask */ memcpy(state.B, k, DELIRIUM_KEY_SIZE); memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); memcpy(mask, state.B, DELIRIUM_KEY_SIZE); memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); memcpy(start, mask, sizeof(mask)); @@ -726,7 +726,7 @@ int delirium_aead_encrypt /* Encrypt using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, m, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); memcpy(c, state.B, KECCAKP_200_STATE_SIZE); @@ -735,7 +735,7 @@ int delirium_aead_encrypt delirium_lfsr(next, mask); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -751,7 +751,7 @@ int delirium_aead_encrypt unsigned temp = (unsigned)mlen; memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, m, temp); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); memcpy(c, state.B, temp); @@ -762,7 +762,7 @@ int delirium_aead_encrypt memset(state.B + temp + 1, 0, KECCAKP_200_STATE_SIZE - temp - 1); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, next, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -772,7 +772,7 @@ int delirium_aead_encrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); state.B[0] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -807,7 +807,7 @@ int delirium_aead_decrypt /* Hash the key and generate the initial mask */ memcpy(state.B, k, DELIRIUM_KEY_SIZE); memset(state.B + DELIRIUM_KEY_SIZE, 0, sizeof(state.B) - DELIRIUM_KEY_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); memcpy(mask, state.B, DELIRIUM_KEY_SIZE); memset(mask + DELIRIUM_KEY_SIZE, 0, sizeof(mask) - DELIRIUM_KEY_SIZE); memcpy(start, mask, sizeof(mask)); @@ -828,7 +828,7 @@ int delirium_aead_decrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, c, KECCAKP_200_STATE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -836,7 +836,7 @@ int delirium_aead_decrypt /* Decrypt using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block_2_src(m, state.B, c, KECCAKP_200_STATE_SIZE); @@ -853,7 +853,7 @@ int delirium_aead_decrypt lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, c, temp); state.B[temp] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); @@ -861,7 +861,7 @@ int delirium_aead_decrypt /* Decrypt the last block using the current mask */ memcpy(state.B, mask, KECCAKP_200_STATE_SIZE); lw_xor_block(state.B, npub, DELIRIUM_NONCE_SIZE); - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, temp); lw_xor_block_2_src(m, state.B, c, temp); c += temp; @@ -870,7 +870,7 @@ int delirium_aead_decrypt delirium_lfsr(next, mask); lw_xor_block_2_src(state.B, mask, next, KECCAKP_200_STATE_SIZE); state.B[0] ^= 0x01; - keccakp_200_permute(&state, 18); + keccakp_200_permute(&state); lw_xor_block(state.B, mask, DELIRIUM_TAG_SIZE); lw_xor_block(state.B, next, DELIRIUM_TAG_SIZE); lw_xor_block(tag, state.B, DELIRIUM_TAG_SIZE); diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak-avr.S b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak-avr.S new file mode 100644 index 0000000..e50ccaf --- /dev/null +++ b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak-avr.S @@ -0,0 +1,1552 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global keccakp_200_permute + .type keccakp_200_permute, @function +keccakp_200_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + ldd r24,Z+24 + push r31 + push r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,130 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + mov r30,r1 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,129 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + ldi r30,136 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,10 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,137 + eor r18,r30 + rcall 82f + ldi r30,3 + eor r18,r30 + rcall 82f + ldi r30,2 + eor r18,r30 + rcall 82f + ldi r30,128 + eor r18,r30 + rjmp 420f +82: + mov r30,r18 + eor r30,r23 + eor r30,r2 + eor r30,r7 + eor r30,r12 + mov r31,r19 + eor r31,r26 + eor r31,r3 + eor r31,r8 + eor r31,r13 + mov r25,r20 + eor r25,r27 + eor r25,r4 + eor r25,r9 + eor r25,r14 + mov r16,r21 + eor r16,r28 + eor r16,r5 + eor r16,r10 + eor r16,r15 + mov r17,r22 + eor r17,r29 + eor r17,r6 + eor r17,r11 + eor r17,r24 + mov r0,r31 + lsl r0 + adc r0,r1 + eor r0,r17 + eor r18,r0 + eor r23,r0 + eor r2,r0 + eor r7,r0 + eor r12,r0 + mov r0,r25 + lsl r0 + adc r0,r1 + eor r0,r30 + eor r19,r0 + eor r26,r0 + eor r3,r0 + eor r8,r0 + eor r13,r0 + mov r0,r16 + lsl r0 + adc r0,r1 + eor r0,r31 + eor r20,r0 + eor r27,r0 + eor r4,r0 + eor r9,r0 + eor r14,r0 + mov r0,r17 + lsl r0 + adc r0,r1 + eor r0,r25 + eor r21,r0 + eor r28,r0 + eor r5,r0 + eor r10,r0 + eor r15,r0 + mov r0,r30 + lsl r0 + adc r0,r1 + eor r0,r16 + eor r22,r0 + eor r29,r0 + eor r6,r0 + eor r11,r0 + eor r24,r0 + mov r30,r19 + swap r26 + mov r19,r26 + swap r29 + mov r26,r29 + mov r0,r1 + lsr r14 + ror r0 + lsr r14 + ror r0 + lsr r14 + ror r0 + or r14,r0 + mov r29,r14 + bst r6,0 + lsr r6 + bld r6,7 + mov r14,r6 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + mov r6,r12 + mov r0,r1 + lsr r20 + ror r0 + lsr r20 + ror r0 + or r20,r0 + mov r12,r20 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + mov r20,r4 + lsl r5 + adc r5,r1 + mov r4,r5 + mov r5,r11 + mov r11,r15 + lsl r7 + adc r7,r1 + mov r15,r7 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + mov r7,r22 + mov r0,r1 + lsr r24 + ror r0 + lsr r24 + ror r0 + or r24,r0 + mov r22,r24 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r24,r13 + bst r28,0 + lsr r28 + bld r28,7 + mov r13,r28 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r28,r8 + swap r23 + mov r8,r23 + swap r21 + mov r23,r21 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r21,r10 + bst r9,0 + lsr r9 + bld r9,7 + mov r10,r9 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + mov r9,r3 + mov r0,r1 + lsr r27 + ror r0 + lsr r27 + ror r0 + or r27,r0 + mov r3,r27 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + mov r27,r2 + lsl r30 + adc r30,r1 + mov r2,r30 + mov r30,r18 + mov r31,r19 + mov r25,r20 + mov r16,r21 + mov r17,r22 + mov r18,r25 + mov r0,r31 + com r0 + and r18,r0 + eor r18,r30 + mov r19,r16 + mov r0,r25 + com r0 + and r19,r0 + eor r19,r31 + mov r20,r17 + mov r0,r16 + com r0 + and r20,r0 + eor r20,r25 + mov r21,r30 + mov r0,r17 + com r0 + and r21,r0 + eor r21,r16 + mov r22,r31 + mov r0,r30 + com r0 + and r22,r0 + eor r22,r17 + mov r30,r23 + mov r31,r26 + mov r25,r27 + mov r16,r28 + mov r17,r29 + mov r23,r25 + mov r0,r31 + com r0 + and r23,r0 + eor r23,r30 + mov r26,r16 + mov r0,r25 + com r0 + and r26,r0 + eor r26,r31 + mov r27,r17 + mov r0,r16 + com r0 + and r27,r0 + eor r27,r25 + mov r28,r30 + mov r0,r17 + com r0 + and r28,r0 + eor r28,r16 + mov r29,r31 + mov r0,r30 + com r0 + and r29,r0 + eor r29,r17 + mov r30,r2 + mov r31,r3 + mov r25,r4 + mov r16,r5 + mov r17,r6 + mov r2,r25 + mov r0,r31 + com r0 + and r2,r0 + eor r2,r30 + mov r3,r16 + mov r0,r25 + com r0 + and r3,r0 + eor r3,r31 + mov r4,r17 + mov r0,r16 + com r0 + and r4,r0 + eor r4,r25 + mov r5,r30 + mov r0,r17 + com r0 + and r5,r0 + eor r5,r16 + mov r6,r31 + mov r0,r30 + com r0 + and r6,r0 + eor r6,r17 + mov r30,r7 + mov r31,r8 + mov r25,r9 + mov r16,r10 + mov r17,r11 + mov r7,r25 + mov r0,r31 + com r0 + and r7,r0 + eor r7,r30 + mov r8,r16 + mov r0,r25 + com r0 + and r8,r0 + eor r8,r31 + mov r9,r17 + mov r0,r16 + com r0 + and r9,r0 + eor r9,r25 + mov r10,r30 + mov r0,r17 + com r0 + and r10,r0 + eor r10,r16 + mov r11,r31 + mov r0,r30 + com r0 + and r11,r0 + eor r11,r17 + mov r30,r12 + mov r31,r13 + mov r25,r14 + mov r16,r15 + mov r17,r24 + mov r12,r25 + mov r0,r31 + com r0 + and r12,r0 + eor r12,r30 + mov r13,r16 + mov r0,r25 + com r0 + and r13,r0 + eor r13,r31 + mov r14,r17 + mov r0,r16 + com r0 + and r14,r0 + eor r14,r25 + mov r15,r30 + mov r0,r17 + com r0 + and r15,r0 + eor r15,r16 + mov r24,r31 + mov r0,r30 + com r0 + and r24,r0 + eor r24,r17 + ret +420: + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + std Z+24,r24 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size keccakp_200_permute, .-keccakp_200_permute + + .text +.global keccakp_400_permute + .type keccakp_400_permute, @function +keccakp_400_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + movw r30,r24 +.L__stack_usage = 17 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + ldd r10,Z+4 + ldd r11,Z+5 + ldd r12,Z+6 + ldd r13,Z+7 + ldd r14,Z+8 + ldd r15,Z+9 + cpi r22,20 + brcs 15f + rcall 153f + ldi r23,1 + eor r6,r23 +15: + cpi r22,19 + brcs 23f + rcall 153f + ldi r23,130 + eor r6,r23 + ldi r17,128 + eor r7,r17 +23: + cpi r22,18 + brcs 31f + rcall 153f + ldi r23,138 + eor r6,r23 + ldi r17,128 + eor r7,r17 +31: + cpi r22,17 + brcs 37f + rcall 153f + ldi r23,128 + eor r7,r23 +37: + cpi r22,16 + brcs 45f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +45: + cpi r22,15 + brcs 51f + rcall 153f + ldi r23,1 + eor r6,r23 +51: + cpi r22,14 + brcs 59f + rcall 153f + ldi r23,129 + eor r6,r23 + ldi r17,128 + eor r7,r17 +59: + cpi r22,13 + brcs 67f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +67: + cpi r22,12 + brcs 73f + rcall 153f + ldi r23,138 + eor r6,r23 +73: + cpi r22,11 + brcs 79f + rcall 153f + ldi r23,136 + eor r6,r23 +79: + cpi r22,10 + brcs 87f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +87: + cpi r22,9 + brcs 93f + rcall 153f + ldi r23,10 + eor r6,r23 +93: + cpi r22,8 + brcs 101f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +101: + cpi r22,7 + brcs 107f + rcall 153f + ldi r23,139 + eor r6,r23 +107: + cpi r22,6 + brcs 115f + rcall 153f + ldi r23,137 + eor r6,r23 + ldi r17,128 + eor r7,r17 +115: + cpi r22,5 + brcs 123f + rcall 153f + ldi r23,3 + eor r6,r23 + ldi r17,128 + eor r7,r17 +123: + cpi r22,4 + brcs 131f + rcall 153f + ldi r23,2 + eor r6,r23 + ldi r17,128 + eor r7,r17 +131: + cpi r22,3 + brcs 137f + rcall 153f + ldi r23,128 + eor r6,r23 +137: + cpi r22,2 + brcs 145f + rcall 153f + ldi r23,10 + eor r6,r23 + ldi r17,128 + eor r7,r17 +145: + cpi r22,1 + brcs 151f + rcall 153f + ldi r23,10 + eor r6,r23 +151: + rjmp 1004f +153: + movw r18,r6 + ldd r0,Z+10 + eor r18,r0 + ldd r0,Z+11 + eor r19,r0 + ldd r0,Z+20 + eor r18,r0 + ldd r0,Z+21 + eor r19,r0 + ldd r0,Z+30 + eor r18,r0 + ldd r0,Z+31 + eor r19,r0 + ldd r0,Z+40 + eor r18,r0 + ldd r0,Z+41 + eor r19,r0 + movw r20,r8 + ldd r0,Z+12 + eor r20,r0 + ldd r0,Z+13 + eor r21,r0 + ldd r0,Z+22 + eor r20,r0 + ldd r0,Z+23 + eor r21,r0 + ldd r0,Z+32 + eor r20,r0 + ldd r0,Z+33 + eor r21,r0 + ldd r0,Z+42 + eor r20,r0 + ldd r0,Z+43 + eor r21,r0 + movw r26,r10 + ldd r0,Z+14 + eor r26,r0 + ldd r0,Z+15 + eor r27,r0 + ldd r0,Z+24 + eor r26,r0 + ldd r0,Z+25 + eor r27,r0 + ldd r0,Z+34 + eor r26,r0 + ldd r0,Z+35 + eor r27,r0 + ldd r0,Z+44 + eor r26,r0 + ldd r0,Z+45 + eor r27,r0 + movw r2,r12 + ldd r0,Z+16 + eor r2,r0 + ldd r0,Z+17 + eor r3,r0 + ldd r0,Z+26 + eor r2,r0 + ldd r0,Z+27 + eor r3,r0 + ldd r0,Z+36 + eor r2,r0 + ldd r0,Z+37 + eor r3,r0 + ldd r0,Z+46 + eor r2,r0 + ldd r0,Z+47 + eor r3,r0 + movw r4,r14 + ldd r0,Z+18 + eor r4,r0 + ldd r0,Z+19 + eor r5,r0 + ldd r0,Z+28 + eor r4,r0 + ldd r0,Z+29 + eor r5,r0 + ldd r0,Z+38 + eor r4,r0 + ldd r0,Z+39 + eor r5,r0 + ldd r0,Z+48 + eor r4,r0 + ldd r0,Z+49 + eor r5,r0 + movw r24,r20 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r4 + eor r25,r5 + eor r6,r24 + eor r7,r25 + ldd r0,Z+10 + eor r0,r24 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r25 + std Z+11,r0 + ldd r0,Z+20 + eor r0,r24 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r25 + std Z+21,r0 + ldd r0,Z+30 + eor r0,r24 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r25 + std Z+31,r0 + ldd r0,Z+40 + eor r0,r24 + std Z+40,r0 + ldd r0,Z+41 + eor r0,r25 + std Z+41,r0 + movw r24,r26 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r8,r24 + eor r9,r25 + ldd r0,Z+12 + eor r0,r24 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r25 + std Z+13,r0 + ldd r0,Z+22 + eor r0,r24 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r25 + std Z+23,r0 + ldd r0,Z+32 + eor r0,r24 + std Z+32,r0 + ldd r0,Z+33 + eor r0,r25 + std Z+33,r0 + ldd r0,Z+42 + eor r0,r24 + std Z+42,r0 + ldd r0,Z+43 + eor r0,r25 + std Z+43,r0 + movw r24,r2 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r10,r24 + eor r11,r25 + ldd r0,Z+14 + eor r0,r24 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r25 + std Z+15,r0 + ldd r0,Z+24 + eor r0,r24 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r25 + std Z+25,r0 + ldd r0,Z+34 + eor r0,r24 + std Z+34,r0 + ldd r0,Z+35 + eor r0,r25 + std Z+35,r0 + ldd r0,Z+44 + eor r0,r24 + std Z+44,r0 + ldd r0,Z+45 + eor r0,r25 + std Z+45,r0 + movw r24,r4 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r26 + eor r25,r27 + eor r12,r24 + eor r13,r25 + ldd r0,Z+16 + eor r0,r24 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r25 + std Z+17,r0 + ldd r0,Z+26 + eor r0,r24 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r25 + std Z+27,r0 + ldd r0,Z+36 + eor r0,r24 + std Z+36,r0 + ldd r0,Z+37 + eor r0,r25 + std Z+37,r0 + ldd r0,Z+46 + eor r0,r24 + std Z+46,r0 + ldd r0,Z+47 + eor r0,r25 + std Z+47,r0 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r2 + eor r25,r3 + eor r14,r24 + eor r15,r25 + ldd r0,Z+18 + eor r0,r24 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r25 + std Z+19,r0 + ldd r0,Z+28 + eor r0,r24 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r25 + std Z+29,r0 + ldd r0,Z+38 + eor r0,r24 + std Z+38,r0 + ldd r0,Z+39 + eor r0,r25 + std Z+39,r0 + ldd r0,Z+48 + eor r0,r24 + std Z+48,r0 + ldd r0,Z+49 + eor r0,r25 + std Z+49,r0 + movw r24,r8 + ldd r8,Z+12 + ldd r9,Z+13 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldd r18,Z+18 + ldd r19,Z+19 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+12,r18 + std Z+13,r19 + ldd r18,Z+44 + ldd r19,Z+45 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+18,r18 + std Z+19,r19 + ldd r18,Z+28 + ldd r19,Z+29 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+44,r18 + std Z+45,r19 + ldd r18,Z+40 + ldd r19,Z+41 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+28,r18 + std Z+29,r19 + movw r18,r10 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+40,r18 + std Z+41,r19 + ldd r10,Z+24 + ldd r11,Z+25 + mov r0,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldd r18,Z+26 + ldd r19,Z+27 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+24,r18 + std Z+25,r19 + ldd r18,Z+38 + ldd r19,Z+39 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+26,r18 + std Z+27,r19 + ldd r18,Z+46 + ldd r19,Z+47 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+38,r18 + std Z+39,r19 + ldd r18,Z+30 + ldd r19,Z+31 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+46,r18 + std Z+47,r19 + movw r18,r14 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+30,r18 + std Z+31,r19 + ldd r14,Z+48 + ldd r15,Z+49 + mov r0,r1 + lsr r15 + ror r14 + ror r0 + lsr r15 + ror r14 + ror r0 + or r15,r0 + ldd r18,Z+42 + ldd r19,Z+43 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+48,r18 + std Z+49,r19 + ldd r18,Z+16 + ldd r19,Z+17 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+42,r18 + std Z+43,r19 + ldd r18,Z+32 + ldd r19,Z+33 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+16,r18 + std Z+17,r19 + ldd r18,Z+10 + ldd r19,Z+11 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+32,r18 + std Z+33,r19 + movw r18,r12 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+10,r18 + std Z+11,r19 + ldd r12,Z+36 + ldd r13,Z+37 + mov r0,r13 + mov r13,r12 + mov r12,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + ldd r18,Z+34 + ldd r19,Z+35 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+36,r18 + std Z+37,r19 + ldd r18,Z+22 + ldd r19,Z+23 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+34,r18 + std Z+35,r19 + ldd r18,Z+14 + ldd r19,Z+15 + mov r0,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+22,r18 + std Z+23,r19 + ldd r18,Z+20 + ldd r19,Z+21 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+14,r18 + std Z+15,r19 + lsl r24 + rol r25 + adc r24,r1 + std Z+20,r24 + std Z+21,r25 + movw r18,r6 + movw r20,r8 + movw r26,r10 + movw r2,r12 + movw r4,r14 + movw r6,r26 + mov r0,r20 + com r0 + and r6,r0 + mov r0,r21 + com r0 + and r7,r0 + eor r6,r18 + eor r7,r19 + movw r8,r2 + mov r0,r26 + com r0 + and r8,r0 + mov r0,r27 + com r0 + and r9,r0 + eor r8,r20 + eor r9,r21 + movw r10,r4 + mov r0,r2 + com r0 + and r10,r0 + mov r0,r3 + com r0 + and r11,r0 + eor r10,r26 + eor r11,r27 + movw r12,r18 + mov r0,r4 + com r0 + and r12,r0 + mov r0,r5 + com r0 + and r13,r0 + eor r12,r2 + eor r13,r3 + movw r14,r20 + mov r0,r18 + com r0 + and r14,r0 + mov r0,r19 + com r0 + and r15,r0 + eor r14,r4 + eor r15,r5 + ldd r18,Z+10 + ldd r19,Z+11 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+10,r24 + std Z+11,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+12,r24 + std Z+13,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+14,r24 + std Z+15,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+16,r24 + std Z+17,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+18,r24 + std Z+19,r25 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+20,r24 + std Z+21,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+22,r24 + std Z+23,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+24,r24 + std Z+25,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+26,r24 + std Z+27,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+28,r24 + std Z+29,r25 + ldd r18,Z+30 + ldd r19,Z+31 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+30,r24 + std Z+31,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+32,r24 + std Z+33,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+34,r24 + std Z+35,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+36,r24 + std Z+37,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+38,r24 + std Z+39,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + ldd r4,Z+48 + ldd r5,Z+49 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+40,r24 + std Z+41,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+42,r24 + std Z+43,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+44,r24 + std Z+45,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+46,r24 + std Z+47,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+48,r24 + std Z+49,r25 + ret +1004: + st Z,r6 + std Z+1,r7 + std Z+2,r8 + std Z+3,r9 + std Z+4,r10 + std Z+5,r11 + std Z+6,r12 + std Z+7,r13 + std Z+8,r14 + std Z+9,r15 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size keccakp_400_permute, .-keccakp_400_permute + +#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.c index c3c4011..60539df 100644 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.c +++ b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.c @@ -22,74 +22,79 @@ #include "internal-keccak.h" +#if !defined(__AVR__) + /* Faster method to compute ((x + y) % 5) that avoids the division */ static unsigned char const addMod5Table[9] = { 0, 1, 2, 3, 4, 0, 1, 2, 3 }; #define addMod5(x, y) (addMod5Table[(x) + (y)]) -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds) +void keccakp_200_permute(keccakp_200_state_t *state) { static uint8_t const RC[18] = { 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, 0x02, 0x80 }; - uint8_t B[5][5]; + uint8_t C[5]; uint8_t D; unsigned round; unsigned index, index2; - for (round = 18 - rounds; round < 18; ++round) { + for (round = 0; round < 18; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_8(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_8(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate4_8(state->A[0][3]); - B[2][0] = leftRotate1_8(state->A[0][1]); - B[3][0] = leftRotate3_8(state->A[0][4]); - B[4][0] = leftRotate6_8(state->A[0][2]); - B[0][1] = leftRotate4_8(state->A[1][1]); - B[1][1] = leftRotate4_8(state->A[1][4]); - B[2][1] = leftRotate6_8(state->A[1][2]); - B[3][1] = leftRotate4_8(state->A[1][0]); - B[4][1] = leftRotate7_8(state->A[1][3]); - B[0][2] = leftRotate3_8(state->A[2][2]); - B[1][2] = leftRotate3_8(state->A[2][0]); - B[2][2] = leftRotate1_8(state->A[2][3]); - B[3][2] = leftRotate2_8(state->A[2][1]); - B[4][2] = leftRotate7_8(state->A[2][4]); - B[0][3] = leftRotate5_8(state->A[3][3]); - B[1][3] = leftRotate5_8(state->A[3][1]); - B[2][3] = state->A[3][4]; - B[3][3] = leftRotate7_8(state->A[3][2]); - B[4][3] = leftRotate1_8(state->A[3][0]); - B[0][4] = leftRotate6_8(state->A[4][4]); - B[1][4] = leftRotate5_8(state->A[4][2]); - B[2][4] = leftRotate2_8(state->A[4][0]); - B[3][4] = state->A[4][3]; - B[4][4] = leftRotate2_8(state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate4_8(state->A[1][1]); + state->A[1][1] = leftRotate4_8(state->A[1][4]); + state->A[1][4] = leftRotate5_8(state->A[4][2]); + state->A[4][2] = leftRotate7_8(state->A[2][4]); + state->A[2][4] = leftRotate2_8(state->A[4][0]); + state->A[4][0] = leftRotate6_8(state->A[0][2]); + state->A[0][2] = leftRotate3_8(state->A[2][2]); + state->A[2][2] = leftRotate1_8(state->A[2][3]); + state->A[2][3] = state->A[3][4]; + state->A[3][4] = state->A[4][3]; + state->A[4][3] = leftRotate1_8(state->A[3][0]); + state->A[3][0] = leftRotate3_8(state->A[0][4]); + state->A[0][4] = leftRotate6_8(state->A[4][4]); + state->A[4][4] = leftRotate2_8(state->A[4][1]); + state->A[4][1] = leftRotate7_8(state->A[1][3]); + state->A[1][3] = leftRotate5_8(state->A[3][1]); + state->A[3][1] = leftRotate4_8(state->A[1][0]); + state->A[1][0] = leftRotate4_8(state->A[0][3]); + state->A[0][3] = leftRotate5_8(state->A[3][3]); + state->A[3][3] = leftRotate7_8(state->A[3][2]); + state->A[3][2] = leftRotate2_8(state->A[2][1]); + state->A[2][1] = leftRotate6_8(state->A[1][2]); + state->A[1][2] = leftRotate3_8(state->A[2][0]); + state->A[2][0] = leftRotate1_8(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -110,61 +115,64 @@ void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, 0x8002, 0x0080, 0x800A, 0x000A }; - uint16_t B[5][5]; + uint16_t C[5]; uint16_t D; unsigned round; unsigned index, index2; for (round = 20 - rounds; round < 20; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_16(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_16(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate12_16(state->A[0][3]); - B[2][0] = leftRotate1_16 (state->A[0][1]); - B[3][0] = leftRotate11_16(state->A[0][4]); - B[4][0] = leftRotate14_16(state->A[0][2]); - B[0][1] = leftRotate12_16(state->A[1][1]); - B[1][1] = leftRotate4_16 (state->A[1][4]); - B[2][1] = leftRotate6_16 (state->A[1][2]); - B[3][1] = leftRotate4_16 (state->A[1][0]); - B[4][1] = leftRotate7_16 (state->A[1][3]); - B[0][2] = leftRotate11_16(state->A[2][2]); - B[1][2] = leftRotate3_16 (state->A[2][0]); - B[2][2] = leftRotate9_16 (state->A[2][3]); - B[3][2] = leftRotate10_16(state->A[2][1]); - B[4][2] = leftRotate7_16 (state->A[2][4]); - B[0][3] = leftRotate5_16 (state->A[3][3]); - B[1][3] = leftRotate13_16(state->A[3][1]); - B[2][3] = leftRotate8_16 (state->A[3][4]); - B[3][3] = leftRotate15_16(state->A[3][2]); - B[4][3] = leftRotate9_16 (state->A[3][0]); - B[0][4] = leftRotate14_16(state->A[4][4]); - B[1][4] = leftRotate13_16(state->A[4][2]); - B[2][4] = leftRotate2_16 (state->A[4][0]); - B[3][4] = leftRotate8_16 (state->A[4][3]); - B[4][4] = leftRotate2_16 (state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate12_16(state->A[1][1]); + state->A[1][1] = leftRotate4_16 (state->A[1][4]); + state->A[1][4] = leftRotate13_16(state->A[4][2]); + state->A[4][2] = leftRotate7_16 (state->A[2][4]); + state->A[2][4] = leftRotate2_16 (state->A[4][0]); + state->A[4][0] = leftRotate14_16(state->A[0][2]); + state->A[0][2] = leftRotate11_16(state->A[2][2]); + state->A[2][2] = leftRotate9_16 (state->A[2][3]); + state->A[2][3] = leftRotate8_16 (state->A[3][4]); + state->A[3][4] = leftRotate8_16 (state->A[4][3]); + state->A[4][3] = leftRotate9_16 (state->A[3][0]); + state->A[3][0] = leftRotate11_16(state->A[0][4]); + state->A[0][4] = leftRotate14_16(state->A[4][4]); + state->A[4][4] = leftRotate2_16 (state->A[4][1]); + state->A[4][1] = leftRotate7_16 (state->A[1][3]); + state->A[1][3] = leftRotate13_16(state->A[3][1]); + state->A[3][1] = leftRotate4_16 (state->A[1][0]); + state->A[1][0] = leftRotate12_16(state->A[0][3]); + state->A[0][3] = leftRotate5_16 (state->A[3][3]); + state->A[3][3] = leftRotate15_16(state->A[3][2]); + state->A[3][2] = leftRotate10_16(state->A[2][1]); + state->A[2][1] = leftRotate6_16 (state->A[1][2]); + state->A[1][2] = leftRotate3_16 (state->A[2][0]); + state->A[2][0] = leftRotate1_16(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -202,3 +210,5 @@ void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) } #endif + +#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.h index 026da50..2ffef42 100644 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.h +++ b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-keccak.h @@ -68,9 +68,8 @@ typedef union * \brief Permutes the Keccak-p[200] state. * * \param state The Keccak-p[200] state to be permuted. - * \param rounds The number of rounds to perform (up to 18). */ -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds); +void keccakp_200_permute(keccakp_200_state_t *state); /** * \brief Permutes the Keccak-p[400] state, which is assumed to be in diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent-avr.S b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent-avr.S new file mode 100644 index 0000000..4a43458 --- /dev/null +++ b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent-avr.S @@ -0,0 +1,1677 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 238 + .byte 237 + .byte 235 + .byte 224 + .byte 226 + .byte 225 + .byte 228 + .byte 239 + .byte 231 + .byte 234 + .byte 232 + .byte 229 + .byte 233 + .byte 236 + .byte 227 + .byte 230 + .byte 222 + .byte 221 + .byte 219 + .byte 208 + .byte 210 + .byte 209 + .byte 212 + .byte 223 + .byte 215 + .byte 218 + .byte 216 + .byte 213 + .byte 217 + .byte 220 + .byte 211 + .byte 214 + .byte 190 + .byte 189 + .byte 187 + .byte 176 + .byte 178 + .byte 177 + .byte 180 + .byte 191 + .byte 183 + .byte 186 + .byte 184 + .byte 181 + .byte 185 + .byte 188 + .byte 179 + .byte 182 + .byte 14 + .byte 13 + .byte 11 + .byte 0 + .byte 2 + .byte 1 + .byte 4 + .byte 15 + .byte 7 + .byte 10 + .byte 8 + .byte 5 + .byte 9 + .byte 12 + .byte 3 + .byte 6 + .byte 46 + .byte 45 + .byte 43 + .byte 32 + .byte 34 + .byte 33 + .byte 36 + .byte 47 + .byte 39 + .byte 42 + .byte 40 + .byte 37 + .byte 41 + .byte 44 + .byte 35 + .byte 38 + .byte 30 + .byte 29 + .byte 27 + .byte 16 + .byte 18 + .byte 17 + .byte 20 + .byte 31 + .byte 23 + .byte 26 + .byte 24 + .byte 21 + .byte 25 + .byte 28 + .byte 19 + .byte 22 + .byte 78 + .byte 77 + .byte 75 + .byte 64 + .byte 66 + .byte 65 + .byte 68 + .byte 79 + .byte 71 + .byte 74 + .byte 72 + .byte 69 + .byte 73 + .byte 76 + .byte 67 + .byte 70 + .byte 254 + .byte 253 + .byte 251 + .byte 240 + .byte 242 + .byte 241 + .byte 244 + .byte 255 + .byte 247 + .byte 250 + .byte 248 + .byte 245 + .byte 249 + .byte 252 + .byte 243 + .byte 246 + .byte 126 + .byte 125 + .byte 123 + .byte 112 + .byte 114 + .byte 113 + .byte 116 + .byte 127 + .byte 119 + .byte 122 + .byte 120 + .byte 117 + .byte 121 + .byte 124 + .byte 115 + .byte 118 + .byte 174 + .byte 173 + .byte 171 + .byte 160 + .byte 162 + .byte 161 + .byte 164 + .byte 175 + .byte 167 + .byte 170 + .byte 168 + .byte 165 + .byte 169 + .byte 172 + .byte 163 + .byte 166 + .byte 142 + .byte 141 + .byte 139 + .byte 128 + .byte 130 + .byte 129 + .byte 132 + .byte 143 + .byte 135 + .byte 138 + .byte 136 + .byte 133 + .byte 137 + .byte 140 + .byte 131 + .byte 134 + .byte 94 + .byte 93 + .byte 91 + .byte 80 + .byte 82 + .byte 81 + .byte 84 + .byte 95 + .byte 87 + .byte 90 + .byte 88 + .byte 85 + .byte 89 + .byte 92 + .byte 83 + .byte 86 + .byte 158 + .byte 157 + .byte 155 + .byte 144 + .byte 146 + .byte 145 + .byte 148 + .byte 159 + .byte 151 + .byte 154 + .byte 152 + .byte 149 + .byte 153 + .byte 156 + .byte 147 + .byte 150 + .byte 206 + .byte 205 + .byte 203 + .byte 192 + .byte 194 + .byte 193 + .byte 196 + .byte 207 + .byte 199 + .byte 202 + .byte 200 + .byte 197 + .byte 201 + .byte 204 + .byte 195 + .byte 198 + .byte 62 + .byte 61 + .byte 59 + .byte 48 + .byte 50 + .byte 49 + .byte 52 + .byte 63 + .byte 55 + .byte 58 + .byte 56 + .byte 53 + .byte 57 + .byte 60 + .byte 51 + .byte 54 + .byte 110 + .byte 109 + .byte 107 + .byte 96 + .byte 98 + .byte 97 + .byte 100 + .byte 111 + .byte 103 + .byte 106 + .byte 104 + .byte 101 + .byte 105 + .byte 108 + .byte 99 + .byte 102 + + .text +.global spongent160_permute + .type spongent160_permute, @function +spongent160_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 +.L__stack_usage = 16 + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + ldd r10,Z+12 + ldd r11,Z+13 + ldd r12,Z+14 + ldd r13,Z+15 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r24,Z+18 + ldd r25,Z+19 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r21,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r21 +#endif + ldi r18,80 + ldi r19,117 + ldi r20,174 +25: + eor r22,r19 + eor r25,r20 + lsl r19 + bst r19,7 + bld r19,0 + mov r0,r1 + bst r19,6 + bld r0,0 + eor r19,r0 + andi r19,127 + lsr r20 + bst r20,0 + bld r20,7 + mov r0,r1 + bst r20,1 + bld r0,7 + eor r20,r0 + andi r20,254 + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r28 +#if defined(RAMPZ) + elpm r28,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r28,Z +#elif defined(__AVR_TINY__) + ld r28,Z +#else + lpm + mov r28,r0 +#endif + mov r30,r29 +#if defined(RAMPZ) + elpm r29,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r29,Z +#elif defined(__AVR_TINY__) + ld r29,Z +#else + lpm + mov r29,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r28,0 + bld r22,4 + bst r6,0 + bld r28,0 + bst r10,1 + bld r6,0 + bst r6,6 + bld r10,1 + bst r13,1 + bld r6,6 + bst r22,7 + bld r13,1 + bst r29,4 + bld r22,7 + bst r12,0 + bld r29,4 + bst r14,2 + bld r12,0 + bst r3,3 + bld r14,2 + bst r23,5 + bld r3,3 + bst r4,4 + bld r23,5 + bst r4,1 + bld r4,4 + bst r2,5 + bld r4,1 + bst r24,4 + bld r2,5 + bst r12,3 + bld r24,4 + bst r15,6 + bld r12,3 + bst r9,3 + bld r15,6 + bst r3,6 + bld r9,3 + bst r29,1 + bld r3,6 + bst r10,4 + bld r29,1 + bst r8,2 + bld r10,4 + bst r23,2 + bld r8,2 + bst r3,0 + bld r23,2 + bst r0,0 + bld r3,0 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r2,0 + bld r23,0 + bst r14,0 + bld r2,0 + bst r2,3 + bld r14,0 + bst r15,4 + bld r2,3 + bst r8,3 + bld r15,4 + bst r23,6 + bld r8,3 + bst r5,0 + bld r23,6 + bst r6,1 + bld r5,0 + bst r10,5 + bld r6,1 + bst r8,6 + bld r10,5 + bst r29,2 + bld r8,6 + bst r11,0 + bld r29,2 + bst r10,2 + bld r11,0 + bst r7,2 + bld r10,2 + bst r15,1 + bld r7,2 + bst r6,7 + bld r15,1 + bst r13,5 + bld r6,7 + bst r28,7 + bld r13,5 + bst r9,4 + bld r28,7 + bst r4,2 + bld r9,4 + bst r3,1 + bld r4,2 + bst r22,5 + bld r3,1 + bst r28,4 + bld r22,5 + bst r8,0 + bld r28,4 + bst r0,0 + bld r8,0 + bst r22,3 + bld r0,0 + bst r23,4 + bld r22,3 + bst r4,0 + bld r23,4 + bst r2,1 + bld r4,0 + bst r14,4 + bld r2,1 + bst r4,3 + bld r14,4 + bst r3,5 + bld r4,3 + bst r28,5 + bld r3,5 + bst r8,4 + bld r28,5 + bst r28,2 + bld r8,4 + bst r7,0 + bld r28,2 + bst r14,1 + bld r7,0 + bst r2,7 + bld r14,1 + bst r25,4 + bld r2,7 + bst r24,3 + bld r25,4 + bst r11,7 + bld r24,3 + bst r13,6 + bld r11,7 + bst r29,3 + bld r13,6 + bst r11,4 + bld r29,3 + bst r12,2 + bld r11,4 + bst r15,2 + bld r12,2 + bst r7,3 + bld r15,2 + bst r15,5 + bld r7,3 + bst r8,7 + bld r15,5 + bst r29,6 + bld r8,7 + bst r13,0 + bld r29,6 + bst r0,0 + bld r13,0 + bst r22,6 + bld r0,0 + bst r29,0 + bld r22,6 + bst r10,0 + bld r29,0 + bst r6,2 + bld r10,0 + bst r11,1 + bld r6,2 + bst r10,6 + bld r11,1 + bst r9,2 + bld r10,6 + bst r3,2 + bld r9,2 + bst r23,1 + bld r3,2 + bst r2,4 + bld r23,1 + bst r24,0 + bld r2,4 + bst r10,3 + bld r24,0 + bst r7,6 + bld r10,3 + bst r25,1 + bld r7,6 + bst r14,7 + bld r25,1 + bst r5,7 + bld r14,7 + bst r9,5 + bld r5,7 + bst r4,6 + bld r9,5 + bst r5,1 + bld r4,6 + bst r6,5 + bld r5,1 + bst r12,5 + bld r6,5 + bst r24,6 + bld r12,5 + bst r13,3 + bld r24,6 + bst r23,7 + bld r13,3 + bst r5,4 + bld r23,7 + bst r8,1 + bld r5,4 + bst r0,0 + bld r8,1 + bst r23,3 + bld r0,0 + bst r3,4 + bld r23,3 + bst r28,1 + bld r3,4 + bst r6,4 + bld r28,1 + bst r12,1 + bld r6,4 + bst r14,6 + bld r12,1 + bst r5,3 + bld r14,6 + bst r7,5 + bld r5,3 + bst r24,5 + bld r7,5 + bst r12,7 + bld r24,5 + bst r25,6 + bld r12,7 + bst r25,3 + bld r25,6 + bst r15,7 + bld r25,3 + bst r9,7 + bld r15,7 + bst r5,6 + bld r9,7 + bst r9,1 + bld r5,6 + bst r2,6 + bld r9,1 + bst r25,0 + bld r2,6 + bst r14,3 + bld r25,0 + bst r3,7 + bld r14,3 + bst r29,5 + bld r3,7 + bst r12,4 + bld r29,5 + bst r24,2 + bld r12,4 + bst r11,3 + bld r24,2 + bst r11,6 + bld r11,3 + bst r13,2 + bld r11,6 + bst r0,0 + bld r13,2 + bst r28,3 + bld r0,0 + bst r7,4 + bld r28,3 + bst r24,1 + bld r7,4 + bst r10,7 + bld r24,1 + bst r9,6 + bld r10,7 + bst r5,2 + bld r9,6 + bst r7,1 + bld r5,2 + bst r14,5 + bld r7,1 + bst r4,7 + bld r14,5 + bst r5,5 + bld r4,7 + bst r8,5 + bld r5,5 + bst r28,6 + bld r8,5 + bst r9,0 + bld r28,6 + bst r2,2 + bld r9,0 + bst r15,0 + bld r2,2 + bst r6,3 + bld r15,0 + bst r11,5 + bld r6,3 + bst r12,6 + bld r11,5 + bst r25,2 + bld r12,6 + bst r15,3 + bld r25,2 + bst r7,7 + bld r15,3 + bst r25,5 + bld r7,7 + bst r24,7 + bld r25,5 + bst r13,7 + bld r24,7 + bst r29,7 + bld r13,7 + bst r13,4 + bld r29,7 + bst r0,0 + bld r13,4 + dec r18 + breq 5389f + rjmp 25b +5389: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + st X+,r22 + st X+,r23 + st X+,r28 + st X+,r29 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size spongent160_permute, .-spongent160_permute + + .text +.global spongent176_permute + .type spongent176_permute, @function +spongent176_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + ldd r10,Z+12 + ldd r11,Z+13 + ldd r12,Z+14 + ldd r13,Z+15 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r24,Z+18 + ldd r25,Z+19 + ldd r16,Z+20 + ldd r17,Z+21 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r21,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r21 +#endif + ldi r18,90 + ldi r19,69 + ldi r20,162 +27: + eor r22,r19 + eor r17,r20 + lsl r19 + bst r19,7 + bld r19,0 + mov r0,r1 + bst r19,6 + bld r0,0 + eor r19,r0 + andi r19,127 + lsr r20 + bst r20,0 + bld r20,7 + mov r0,r1 + bst r20,1 + bld r0,7 + eor r20,r0 + andi r20,254 + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r28 +#if defined(RAMPZ) + elpm r28,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r28,Z +#elif defined(__AVR_TINY__) + ld r28,Z +#else + lpm + mov r28,r0 +#endif + mov r30,r29 +#if defined(RAMPZ) + elpm r29,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r29,Z +#elif defined(__AVR_TINY__) + ld r29,Z +#else + lpm + mov r29,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r28,0 + bld r22,4 + bst r6,0 + bld r28,0 + bst r8,1 + bld r6,0 + bst r24,5 + bld r8,1 + bst r6,7 + bld r24,5 + bst r11,5 + bld r6,7 + bst r8,6 + bld r11,5 + bst r17,1 + bld r8,6 + bst r24,7 + bld r17,1 + bst r7,7 + bld r24,7 + bst r15,5 + bld r7,7 + bst r2,7 + bld r15,5 + bst r25,4 + bld r2,7 + bst r10,3 + bld r25,4 + bst r3,6 + bld r10,3 + bst r23,1 + bld r3,6 + bst r2,4 + bld r23,1 + bst r24,0 + bld r2,4 + bst r4,3 + bld r24,0 + bst r29,5 + bld r4,3 + bst r12,4 + bld r29,5 + bst r12,2 + bld r12,4 + bst r11,2 + bld r12,2 + bst r7,2 + bld r11,2 + bst r13,1 + bld r7,2 + bst r14,6 + bld r13,1 + bst r23,3 + bld r14,6 + bst r3,4 + bld r23,3 + bst r0,0 + bld r3,4 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r2,0 + bld r23,0 + bst r14,0 + bld r2,0 + bst r16,2 + bld r14,0 + bst r13,3 + bld r16,2 + bst r15,6 + bld r13,3 + bst r3,3 + bld r15,6 + bst r17,4 + bld r3,3 + bst r16,3 + bld r17,4 + bst r13,7 + bld r16,3 + bst r25,6 + bld r13,7 + bst r11,3 + bld r25,6 + bst r7,6 + bld r11,3 + bst r15,1 + bld r7,6 + bst r28,7 + bld r15,1 + bst r9,4 + bld r28,7 + bst r28,2 + bld r9,4 + bst r7,0 + bld r28,2 + bst r12,1 + bld r7,0 + bst r10,6 + bld r12,1 + bst r5,2 + bld r10,6 + bst r5,1 + bld r5,2 + bst r4,5 + bld r5,1 + bst r2,5 + bld r4,5 + bst r24,4 + bld r2,5 + bst r6,3 + bld r24,4 + bst r9,5 + bld r6,3 + bst r28,6 + bld r9,5 + bst r9,0 + bld r28,6 + bst r0,0 + bld r9,0 + bst r22,3 + bld r0,0 + bst r23,4 + bld r22,3 + bst r4,0 + bld r23,4 + bst r28,1 + bld r4,0 + bst r6,4 + bld r28,1 + bst r10,1 + bld r6,4 + bst r2,6 + bld r10,1 + bst r25,0 + bld r2,6 + bst r8,3 + bld r25,0 + bst r25,5 + bld r8,3 + bst r10,7 + bld r25,5 + bst r5,6 + bld r10,7 + bst r7,1 + bld r5,6 + bst r12,5 + bld r7,1 + bst r12,6 + bld r12,5 + bst r13,2 + bld r12,6 + bst r15,2 + bld r13,2 + bst r29,3 + bld r15,2 + bst r11,4 + bld r29,3 + bst r8,2 + bld r11,4 + bst r25,1 + bld r8,2 + bst r8,7 + bld r25,1 + bst r17,5 + bld r8,7 + bst r16,7 + bld r17,5 + bst r15,7 + bld r16,7 + bst r3,7 + bld r15,7 + bst r23,5 + bld r3,7 + bst r4,4 + bld r23,5 + bst r2,1 + bld r4,4 + bst r14,4 + bld r2,1 + bst r0,0 + bld r14,4 + bst r22,5 + bld r0,0 + bst r28,4 + bld r22,5 + bst r8,0 + bld r28,4 + bst r24,1 + bld r8,0 + bst r4,7 + bld r24,1 + bst r3,5 + bld r4,7 + bst r0,0 + bld r3,5 + bst r22,6 + bld r0,0 + bst r29,0 + bld r22,6 + bst r10,0 + bld r29,0 + bst r2,2 + bld r10,0 + bst r15,0 + bld r2,2 + bst r28,3 + bld r15,0 + bst r7,4 + bld r28,3 + bst r14,1 + bld r7,4 + bst r16,6 + bld r14,1 + bst r15,3 + bld r16,6 + bst r29,7 + bld r15,3 + bst r13,4 + bld r29,7 + bst r24,2 + bld r13,4 + bst r5,3 + bld r24,2 + bst r5,5 + bld r5,3 + bst r6,5 + bld r5,5 + bst r10,5 + bld r6,5 + bst r4,6 + bld r10,5 + bst r3,1 + bld r4,6 + bst r16,4 + bld r3,1 + bst r14,3 + bld r16,4 + bst r17,6 + bld r14,3 + bst r17,3 + bld r17,6 + bst r25,7 + bld r17,3 + bst r11,7 + bld r25,7 + bst r9,6 + bld r11,7 + bst r29,2 + bld r9,6 + bst r11,0 + bld r29,2 + bst r6,2 + bld r11,0 + bst r9,1 + bld r6,2 + bst r0,0 + bld r9,1 + bst r22,7 + bld r0,0 + bst r29,4 + bld r22,7 + bst r12,0 + bld r29,4 + bst r10,2 + bld r12,0 + bst r3,2 + bld r10,2 + bst r17,0 + bld r3,2 + bst r24,3 + bld r17,0 + bst r5,7 + bld r24,3 + bst r7,5 + bld r5,7 + bst r14,5 + bld r7,5 + bst r0,0 + bld r14,5 + bst r23,2 + bld r0,0 + bst r3,0 + bld r23,2 + bst r16,0 + bld r3,0 + bst r12,3 + bld r16,0 + bst r11,6 + bld r12,3 + bst r9,2 + bld r11,6 + bst r0,0 + bld r9,2 + bst r23,6 + bld r0,0 + bst r5,0 + bld r23,6 + bst r4,1 + bld r5,0 + bst r28,5 + bld r4,1 + bst r8,4 + bld r28,5 + bst r16,1 + bld r8,4 + bst r12,7 + bld r16,1 + bst r13,6 + bld r12,7 + bst r25,2 + bld r13,6 + bst r9,3 + bld r25,2 + bst r0,0 + bld r9,3 + bst r23,7 + bld r0,0 + bst r5,4 + bld r23,7 + bst r6,1 + bld r5,4 + bst r8,5 + bld r6,1 + bst r16,5 + bld r8,5 + bst r14,7 + bld r16,5 + bst r0,0 + bld r14,7 + bst r29,1 + bld r0,0 + bst r10,4 + bld r29,1 + bst r4,2 + bld r10,4 + bst r0,0 + bld r4,2 + bst r29,6 + bld r0,0 + bst r13,0 + bld r29,6 + bst r14,2 + bld r13,0 + bst r17,2 + bld r14,2 + bst r25,3 + bld r17,2 + bst r9,7 + bld r25,3 + bst r0,0 + bld r9,7 + bst r2,3 + bld r0,0 + bst r15,4 + bld r2,3 + bst r0,0 + bld r15,4 + bst r6,6 + bld r0,0 + bst r11,1 + bld r6,6 + bst r0,0 + bld r11,1 + bst r7,3 + bld r0,0 + bst r13,5 + bld r7,3 + bst r24,6 + bld r13,5 + bst r0,0 + bld r24,6 + dec r18 + breq 5445f + rjmp 27b +5445: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + st X+,r22 + st X+,r23 + st X+,r28 + st X+,r29 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + st X+,r24 + st X+,r25 + st X+,r16 + st X+,r17 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size spongent176_permute, .-spongent176_permute + +#endif diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent.c b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent.c index 69a8ecb..8e0d57d 100644 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent.c +++ b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-spongent.c @@ -22,6 +22,8 @@ #include "internal-spongent.h" +#if !defined(__AVR__) + /** * \brief Applies the Spongent-pi S-box in parallel to the 8 nibbles * of a 32-bit word. @@ -344,3 +346,5 @@ void spongent176_permute(spongent176_state_t *state) le_store_word16(state->B + 20, x5); /* Last word is only 16 bits */ #endif } + +#endif /* !__AVR__ */ diff --git a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-util.h b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-util.h +++ b/elephant/Implementations/crypto_aead/elephant200v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.c b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/api.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/encrypt.c b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/encrypt.c deleted file mode 100644 index daa5139..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "estate.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return estate_twegift_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return estate_twegift_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.c b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.c deleted file mode 100644 index a570791..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "estate.h" -#include "internal-gift128.h" -#include "internal-util.h" -#include - -aead_cipher_t const estate_twegift_cipher = { - "ESTATE_TweGIFT-128", - ESTATE_TWEGIFT_KEY_SIZE, - ESTATE_TWEGIFT_NONCE_SIZE, - ESTATE_TWEGIFT_TAG_SIZE, - AEAD_FLAG_NONE, - estate_twegift_aead_encrypt, - estate_twegift_aead_decrypt -}; - -/** - * \brief Generates the FCBC MAC for a packet using ESTATE_TweGIFT-128. - * - * \param ks The key schedule for TweGIFT-128. - * \param tag Rolling state of the authentication tag. - * \param m Message to be authenticated. - * \param mlen Length of the message to be authenticated; must be >= 1. - * \param tweak1 Tweak value to use when the last block is full. - * \param tweak2 Tweak value to use when the last block is partial. - */ -static void estate_twegift_fcbc - (const gift128n_key_schedule_t *ks, unsigned char tag[16], - const unsigned char *m, unsigned long long mlen, - uint32_t tweak1, uint32_t tweak2) -{ - while (mlen > 16) { - lw_xor_block(tag, m, 16); - gift128n_encrypt(ks, tag, tag); - m += 16; - mlen -= 16; - } - if (mlen == 16) { - lw_xor_block(tag, m, 16); - gift128t_encrypt(ks, tag, tag, tweak1); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block(tag, m, temp); - tag[temp] ^= 0x01; - gift128t_encrypt(ks, tag, tag, tweak2); - } -} - -/** - * \brief Generates the MAC for a packet using ESTATE_TweGIFT-128. - * - * \param ks The key schedule for TweGIFT-128. - * \param tag Rolling state of the authentication tag. - * \param m Message to be authenticated. - * \param mlen Length of the message to be authenticated. - * \param ad Associated data to be authenticated. - * \param adlen Length of the associated data to be authenticated. - */ -static void estate_twegift_authenticate - (const gift128n_key_schedule_t *ks, unsigned char tag[16], - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen) -{ - /* Handle the case where both the message and associated data are empty */ - if (mlen == 0 && adlen == 0) { - gift128t_encrypt(ks, tag, tag, GIFT128T_TWEAK_8); - return; - } - - /* Encrypt the nonce */ - gift128t_encrypt(ks, tag, tag, GIFT128T_TWEAK_1); - - /* Compute the FCBC MAC over the associated data */ - if (adlen != 0) { - if (mlen != 0) { - estate_twegift_fcbc - (ks, tag, ad, adlen, GIFT128T_TWEAK_2, GIFT128T_TWEAK_3); - } else { - estate_twegift_fcbc - (ks, tag, ad, adlen, GIFT128T_TWEAK_6, GIFT128T_TWEAK_7); - } - } - - /* Compute the FCBC MAC over the message data */ - if (mlen != 0) { - estate_twegift_fcbc - (ks, tag, m, mlen, GIFT128T_TWEAK_4, GIFT128T_TWEAK_5); - } -} - -/** - * \brief Encrypts (or decrypts) a payload using ESTATE_TweGIFT-128. - * - * \param ks The key schedule for TweGIFT-128. - * \param tag Pre-computed authentication tag for the packet. - * \param c Ciphertext after encryption. - * \param m Plaintext to be encrypted. - * \param mlen Length of the plaintext to be encrypted. - */ -static void estate_twegift_encrypt - (const gift128n_key_schedule_t *ks, const unsigned char tag[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[16]; - memcpy(block, tag, 16); - while (mlen >= 16) { - gift128n_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, 16); - c += 16; - m += 16; - mlen -= 16; - } - if (mlen > 0) { - gift128n_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, (unsigned)mlen); - } -} - -int estate_twegift_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gift128n_key_schedule_t ks; - unsigned char tag[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ESTATE_TWEGIFT_TAG_SIZE; - - /* Set up the key schedule and copy the nonce into the tag */ - gift128n_init(&ks, k); - memcpy(tag, npub, 16); - - /* Authenticate the associated data and plaintext */ - estate_twegift_authenticate(&ks, tag, m, mlen, ad, adlen); - - /* Encrypt the plaintext to generate the ciphertext */ - estate_twegift_encrypt(&ks, tag, c, m, mlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, tag, 16); - return 0; -} - -int estate_twegift_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gift128n_key_schedule_t ks; - unsigned char tag[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ESTATE_TWEGIFT_TAG_SIZE) - return -1; - *mlen = clen - ESTATE_TWEGIFT_TAG_SIZE; - - /* Set up the key schedule and copy the nonce into the tag */ - gift128n_init(&ks, k); - memcpy(tag, npub, 16); - - /* Decrypt the ciphertext to generate the plaintext */ - estate_twegift_encrypt(&ks, c + *mlen, m, c, *mlen); - - /* Authenticate the associated data and plaintext */ - estate_twegift_authenticate(&ks, tag, m, *mlen, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, tag, c + *mlen, 16); -} diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.h deleted file mode 100644 index d38ee16..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/estate.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ESTATE_H -#define LWCRYPTO_ESTATE_H - -#include "aead-common.h" - -/** - * \file estate.h - * \brief ESTATE authenticated encryption algorithm. - * - * ESTATE_TweGIFT-128 is an authenticated encryption algorithm with a - * 128-bit key, a 128-bit nonce, and a 128-bit tag. It is a two-pass - * algorithm that is built around a tweaked version of the GIFT-128 block - * cipher, the FCBC authentication mode, and the OFB encryption mode. - * - * ESTATE is resistant against nonce reuse as long as the combination - * of the associated data and plaintext is unique. - * - * If a nonce is reused then two packets with the same nonce, associated data, - * and plaintext will encrypt to the same ciphertext. This will leak that - * the same plaintext has been sent for a second time but will not reveal - * the plaintext itself. - * - * The ESTATE family also includes variants build around tweaked versions - * of the AES block cipher. We do not implement those variants in this - * library. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ESTATE_TweGIFT-128. - */ -#define ESTATE_TWEGIFT_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for ESTATE_TweGIFT-128. - */ -#define ESTATE_TWEGIFT_TAG_SIZE 16 - -/** - * \brief Size of the nonce for ESTATE_TweGIFT-128. - */ -#define ESTATE_TWEGIFT_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the ESTATE_TweGIFT-128 cipher. - */ -extern aead_cipher_t const estate_twegift_cipher; - -/** - * \brief Encrypts and authenticates a packet with ESTATE_TweGIFT-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa estate_twegift_aead_decrypt() - */ -int estate_twegift_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ESTATE_TweGIFT-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa estate_twegift_aead_encrypt() - */ -int estate_twegift_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128-config.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128-config.h deleted file mode 100644 index 62131ba..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128-config.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_CONFIG_H -#define LW_INTERNAL_GIFT128_CONFIG_H - -/** - * \file internal-gift128-config.h - * \brief Configures the variant of GIFT-128 to use. - */ - -/** - * \brief Select the full variant of GIFT-128. - * - * The full variant requires 320 bytes for the key schedule and uses the - * fixslicing method to implement encryption and decryption. - */ -#define GIFT128_VARIANT_FULL 0 - -/** - * \brief Select the small variant of GIFT-128. - * - * The small variant requires 80 bytes for the key schedule. The rest - * of the key schedule is expanded on the fly during encryption. - * - * The fixslicing method is used to implement encryption and the slower - * bitslicing method is used to implement decryption. The small variant - * is suitable when memory is at a premium, decryption is not needed, - * but encryption performance is still important. - */ -#define GIFT128_VARIANT_SMALL 1 - -/** - * \brief Select the tiny variant of GIFT-128. - * - * The tiny variant requires 16 bytes for the key schedule and uses the - * bitslicing method to implement encryption and decryption. It is suitable - * for use when memory is very tight and performance is not critical. - */ -#define GIFT128_VARIANT_TINY 2 - -/** - * \def GIFT128_VARIANT - * \brief Selects the default variant of GIFT-128 to use on this platform. - */ -/** - * \def GIFT128_VARIANT_ASM - * \brief Defined to 1 if the GIFT-128 implementation has been replaced - * with an assembly code version. - */ -#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 1 -#endif -#if !defined(GIFT128_VARIANT) -#define GIFT128_VARIANT GIFT128_VARIANT_FULL -#endif -#if !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 0 -#endif - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.c b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.c deleted file mode 100644 index c6ac5ec..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift128.h" -#include "internal-util.h" - -#if !GIFT128_VARIANT_ASM - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC_fixsliced[40] = { - 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, - 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, - 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, - 0x03020180, 0x8000002b, 0x10080880, 0x60014000, 0x01400002, 0x02020080, - 0x80000021, 0x10000080, 0x0001c000, 0x51000002, 0x03010180, 0x8000002e, - 0x10088800, 0x60012000, 0x40500002, 0x01030080, 0x80000006, 0x10008808, - 0xc001a000, 0x14500002, 0x01020181, 0x8000001a -}; - -#endif - -#if GIFT128_VARIANT != GIFT128_VARIANT_FULL - -/* Round constants for GIFT-128 in the bitsliced representation */ -static uint8_t const GIFT128_RC[40] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, - 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A -}; - -#endif - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 8 bits with respect to the next: - * - * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 - * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 - * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 - * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 - * - * The most efficient permutation from the online generator was P3, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P3 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate8(_x); \ - } while (0) -#define PERM1(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate16(_x); \ - } while (0) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate24(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -#define INV_PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x00550055, 9); \ - bit_permute_step(x, 0x00003333, 18); \ - bit_permute_step(x, 0x000f000f, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate8(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) \ - do { \ - uint32_t _x = rightRotate16(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate24(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = (x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); -} - -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); -} - -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift128b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t tmp = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= tmp; \ - (a) ^= tmp << (shift); \ - } while (0) - -/** - * \brief Derives the next 10 fixsliced keys in the key schedule. - * - * \param next Points to the buffer to receive the next 10 keys. - * \param prev Points to the buffer holding the previous 10 keys. - * - * The \a next and \a prev buffers are allowed to be the same. - */ -#define gift128b_derive_keys(next, prev) \ - do { \ - /* Key 0 */ \ - uint32_t s = (prev)[0]; \ - uint32_t t = (prev)[1]; \ - gift128b_swap_move(t, t, 0x00003333U, 16); \ - gift128b_swap_move(t, t, 0x55554444U, 1); \ - (next)[0] = t; \ - /* Key 1 */ \ - s = leftRotate8(s & 0x33333333U) | leftRotate16(s & 0xCCCCCCCCU); \ - gift128b_swap_move(s, s, 0x55551100U, 1); \ - (next)[1] = s; \ - /* Key 2 */ \ - s = (prev)[2]; \ - t = (prev)[3]; \ - (next)[2] = ((t >> 4) & 0x0F000F00U) | ((t & 0x0F000F00U) << 4) | \ - ((t >> 6) & 0x00030003U) | ((t & 0x003F003FU) << 2); \ - /* Key 3 */ \ - (next)[3] = ((s >> 6) & 0x03000300U) | ((s & 0x3F003F00U) << 2) | \ - ((s >> 5) & 0x00070007U) | ((s & 0x001F001FU) << 3); \ - /* Key 4 */ \ - s = (prev)[4]; \ - t = (prev)[5]; \ - (next)[4] = leftRotate8(t & 0xAAAAAAAAU) | \ - leftRotate16(t & 0x55555555U); \ - /* Key 5 */ \ - (next)[5] = leftRotate8(s & 0x55555555U) | \ - leftRotate12(s & 0xAAAAAAAAU); \ - /* Key 6 */ \ - s = (prev)[6]; \ - t = (prev)[7]; \ - (next)[6] = ((t >> 2) & 0x03030303U) | ((t & 0x03030303U) << 2) | \ - ((t >> 1) & 0x70707070U) | ((t & 0x10101010U) << 3); \ - /* Key 7 */ \ - (next)[7] = ((s >> 18) & 0x00003030U) | ((s & 0x01010101U) << 3) | \ - ((s >> 14) & 0x0000C0C0U) | ((s & 0x0000E0E0U) << 15) | \ - ((s >> 1) & 0x07070707U) | ((s & 0x00001010U) << 19); \ - /* Key 8 */ \ - s = (prev)[8]; \ - t = (prev)[9]; \ - (next)[8] = ((t >> 4) & 0x0FFF0000U) | ((t & 0x000F0000U) << 12) | \ - ((t >> 8) & 0x000000FFU) | ((t & 0x000000FFU) << 8); \ - /* Key 9 */ \ - (next)[9] = ((s >> 6) & 0x03FF0000U) | ((s & 0x003F0000U) << 10) | \ - ((s >> 4) & 0x00000FFFU) | ((s & 0x0000000FU) << 12); \ - } while (0) - -/** - * \brief Compute the round keys for GIFT-128 in the fixsliced representation. - * - * \param ks Points to the key schedule to initialize. - * \param k0 First key word. - * \param k1 Second key word. - * \param k2 Third key word. - * \param k3 Fourth key word. - */ -static void gift128b_compute_round_keys - (gift128b_key_schedule_t *ks, - uint32_t k0, uint32_t k1, uint32_t k2, uint32_t k3) -{ - unsigned index; - uint32_t temp; - - /* Set the regular key with k0 and k3 pre-swapped for the round function */ - ks->k[0] = k3; - ks->k[1] = k1; - ks->k[2] = k2; - ks->k[3] = k0; - - /* Pre-compute the keys for rounds 3..10 and permute into fixsliced form */ - for (index = 4; index < 20; index += 2) { - ks->k[index] = ks->k[index - 3]; - temp = ks->k[index - 4]; - temp = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - ks->k[index + 1] = temp; - } - for (index = 0; index < 20; index += 10) { - /* Keys 0 and 10 */ - temp = ks->k[index]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index] = temp; - - /* Keys 1 and 11 */ - temp = ks->k[index + 1]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 1] = temp; - - /* Keys 2 and 12 */ - temp = ks->k[index + 2]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 2] = temp; - - /* Keys 3 and 13 */ - temp = ks->k[index + 3]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 3] = temp; - - /* Keys 4 and 14 */ - temp = ks->k[index + 4]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 4] = temp; - - /* Keys 5 and 15 */ - temp = ks->k[index + 5]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 5] = temp; - - /* Keys 6 and 16 */ - temp = ks->k[index + 6]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 6] = temp; - - /* Keys 7 and 17 */ - temp = ks->k[index + 7]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 7] = temp; - - /* Keys 8, 9, 18, and 19 do not need any adjustment */ - } - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - /* Derive the fixsliced keys for the remaining rounds 11..40 */ - for (index = 20; index < 80; index += 10) { - gift128b_derive_keys(ks->k + index, ks->k + index - 20); - } -#endif -} - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - gift128b_compute_round_keys - (ks, be_load_word32(key), be_load_word32(key + 4), - be_load_word32(key + 8), be_load_word32(key + 12)); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission */ - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); -} - -/** - * \brief Performs the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s3 ^= 0xFFFFFFFFU; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s0 ^= 0xFFFFFFFFU; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/** - * \brief Permutes the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 3) & 0x11111111U) | ((s2 & 0x77777777U) << 1); \ - s3 = ((s3 >> 1) & 0x77777777U) | ((s3 & 0x11111111U) << 3); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 4) & 0x0FFF0FFFU) | ((s0 & 0x000F000FU) << 12); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 12) & 0x000F000FU) | ((s2 & 0x0FFF0FFFU) << 4); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s3 = leftRotate16(s3); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 6) & 0x03030303U) | ((s0 & 0x3F3F3F3FU) << 2); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 2) & 0x3F3F3F3FU) | ((s2 & 0x03030303U) << 6); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = rightRotate8(s2); \ - s3 = leftRotate8(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 1) & 0x77777777U) | ((s2 & 0x11111111U) << 3); \ - s3 = ((s3 >> 3) & 0x11111111U) | ((s3 & 0x77777777U) << 1); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 12) & 0x000F000FU) | ((s0 & 0x0FFF0FFFU) << 4); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 4) & 0x0FFF0FFFU) | ((s2 & 0x000F000FU) << 12); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - s3 = leftRotate16(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 2) & 0x3F3F3F3FU) | ((s0 & 0x03030303U) << 6); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 6) & 0x03030303U) | ((s2 & 0x3F3F3F3FU) << 2); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = leftRotate8(s2); \ - s3 = rightRotate8(s3); \ - } while (0); - -/** - * \brief Performs five fixsliced encryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of five rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 5 rounds. - */ -#define gift128b_encrypt_5_rounds(rk, rc) \ - do { \ - /* 1st round - S-box, rotate left, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_1(s0, s1, s2, s3); \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - \ - /* 2nd round - S-box, rotate up, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_2(s0, s1, s2, s3); \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_3(s0, s1, s2, s3); \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - \ - /* 4th round - S-box, rotate left and swap rows, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_4(s0, s1, s2, s3); \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - \ - /* 5th round - S-box, rotate up, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_5(s0, s1, s2, s3); \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - \ - /* Swap s0 and s3 in preparation for the next 1st round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - } while (0) - -/** - * \brief Performs five fixsliced decryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - */ -#define gift128b_decrypt_5_rounds(rk, rc) \ - do { \ - /* Swap s0 and s3 in preparation for the next 5th round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - \ - /* 5th round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - gift128b_inv_permute_state_5(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 4th round - S-box, rotate right and swap rows, add round key */ \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - gift128b_inv_permute_state_4(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - gift128b_inv_permute_state_3(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 2nd round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - gift128b_inv_permute_state_2(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 1st round - S-box, rotate right, add round key */ \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - gift128b_inv_permute_state_1(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - } while (0) - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - /* Mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = be_load_word32(key + 12); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission - * and mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = le_load_word32(key); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key + 12); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if (((round + 1) % 5) == 0 && round < 39) - s0 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -/* The small variant uses fixslicing for encryption, but we need to change - * to bitslicing for decryption because of the difficulty of fast-forwarding - * the fixsliced key schedule to the end. So the tiny variant is used for - * decryption when the small variant is selected. Since the NIST AEAD modes - * for GIFT-128 only use the block encrypt operation, the inefficiencies - * in decryption don't matter all that much */ - -/** - * \def gift128b_load_and_forward_schedule() - * \brief Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 40; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 10 times for the full 40 rounds. The overall - * effect is to apply a "20 right and 40 left" bit-rotation to every - * word in the key schedule. That is equivalent to "4 right and 8 left" - * on the 16-bit sub-words. - */ -#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#else -/* The small variant needs to also undo some of the rotations that were - * done to generate the fixsliced version of the key schedule */ -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ - gift128b_swap_move(w3, w3, 0x00003333U, 18); \ - gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ - gift128b_swap_move(w3, w3, 0x00550055U, 9); \ - gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ - gift128b_swap_move(w1, w1, 0x00003333U, 18); \ - gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ - gift128b_swap_move(w1, w1, 0x00550055U, 9); \ - gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ - gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ - gift128b_swap_move(w2, w2, 0x03030303U, 6); \ - gift128b_swap_move(w2, w2, 0x11111111U, 3); \ - gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ - gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ - gift128b_swap_move(w0, w0, 0x03030303U, 6); \ - gift128b_swap_move(w0, w0, 0x11111111U, 3); \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#endif - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if ((round % 5) == 0 && round < 40) - s0 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -#endif /* !GIFT128_VARIANT_ASM */ diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.h deleted file mode 100644 index f57d143..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_H -#define LW_INTERNAL_GIFT128_H - -/** - * \file internal-gift128.h - * \brief GIFT-128 block cipher. - * - * There are three versions of GIFT-128 in use within the second round - * submissions to the NIST lightweight cryptography competition. - * - * The most efficient version for 32-bit software implementation is the - * GIFT-128-b bit-sliced version from GIFT-COFB and SUNDAE-GIFT. - * - * The second is the nibble-based version from HYENA. We implement the - * HYENA version as a wrapper around the bit-sliced version. - * - * The third version is a variant on the HYENA nibble-based version that - * includes a 4-bit tweak value for domain separation. It is used by - * the ESTATE submission to NIST. - * - * Technically there is a fourth version of GIFT-128 which is the one that - * appeared in the original GIFT-128 paper. It is almost the same as the - * HYENA version except that the byte ordering is big-endian instead of - * HYENA's little-endian. The original version of GIFT-128 doesn't appear - * in any of the NIST submissions so we don't bother with it in this library. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include -#include "internal-gift128-config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of a GIFT-128 block in bytes. - */ -#define GIFT128_BLOCK_SIZE 16 - -/** - * \var GIFT128_ROUND_KEYS - * \brief Number of round keys for the GIFT-128 key schedule. - */ -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY -#define GIFT128_ROUND_KEYS 4 -#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL -#define GIFT128_ROUND_KEYS 20 -#else -#define GIFT128_ROUND_KEYS 80 -#endif - -/** - * \brief Structure of the key schedule for GIFT-128 (bit-sliced). - */ -typedef struct -{ - /** Pre-computed round keys for bit-sliced GIFT-128 */ - uint32_t k[GIFT128_ROUND_KEYS]; - -} gift128b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced and pre-loaded). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version assumes that the input has already been pre-loaded from - * big-endian into host byte order in the supplied word array. The output - * is delivered in the same way. - */ -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Structure of the key schedule for GIFT-128 (nibble-based). - */ -typedef gift128b_key_schedule_t gift128n_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ -#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ -#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ -#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ -#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ -#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ -#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ -#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ -#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ -#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ -#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ -#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ -#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ -#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ -#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ -#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ -#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ - -/** - * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -/** - * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-avr.S deleted file mode 100644 index 2aae304..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-avr.S +++ /dev/null @@ -1,4712 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 40 -table_0: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -302: - rcall 455f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 455f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 455f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 455f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 302b - rjmp 804f -455: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -804: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -370: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - cpse r16,r1 - rjmp 370b - rjmp 867f -522: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -867: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r19,r1 - mov r26,r1 -307: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - movw r20,r2 - movw r22,r4 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - mov r0,r8 - and r0,r22 - eor r12,r0 - mov r0,r9 - and r0,r23 - eor r13,r0 - movw r2,r14 - movw r4,r24 - movw r14,r20 - movw r24,r22 - bst r2,1 - bld r0,0 - bst r2,4 - bld r2,1 - bst r4,0 - bld r2,4 - bst r2,2 - bld r4,0 - bst r3,0 - bld r2,2 - bst r2,3 - bld r3,0 - bst r3,4 - bld r2,3 - bst r4,3 - bld r3,4 - bst r3,6 - bld r4,3 - bst r5,3 - bld r3,6 - bst r3,5 - bld r5,3 - bst r4,7 - bld r3,5 - bst r5,6 - bld r4,7 - bst r5,1 - bld r5,6 - bst r2,5 - bld r5,1 - bst r4,4 - bld r2,5 - bst r4,2 - bld r4,4 - bst r3,2 - bld r4,2 - bst r3,3 - bld r3,2 - bst r3,7 - bld r3,3 - bst r5,7 - bld r3,7 - bst r5,5 - bld r5,7 - bst r4,5 - bld r5,5 - bst r4,6 - bld r4,5 - bst r5,2 - bld r4,6 - bst r3,1 - bld r5,2 - bst r2,7 - bld r3,1 - bst r5,4 - bld r2,7 - bst r4,1 - bld r5,4 - bst r2,6 - bld r4,1 - bst r5,0 - bld r2,6 - bst r0,0 - bld r5,0 - bst r6,0 - bld r0,0 - bst r6,1 - bld r6,0 - bst r6,5 - bld r6,1 - bst r8,5 - bld r6,5 - bst r8,7 - bld r8,5 - bst r9,7 - bld r8,7 - bst r9,6 - bld r9,7 - bst r9,2 - bld r9,6 - bst r7,2 - bld r9,2 - bst r7,0 - bld r7,2 - bst r0,0 - bld r7,0 - bst r6,2 - bld r0,0 - bst r7,1 - bld r6,2 - bst r6,4 - bld r7,1 - bst r8,1 - bld r6,4 - bst r6,7 - bld r8,1 - bst r9,5 - bld r6,7 - bst r8,6 - bld r9,5 - bst r9,3 - bld r8,6 - bst r7,6 - bld r9,3 - bst r9,0 - bld r7,6 - bst r0,0 - bld r9,0 - bst r6,3 - bld r0,0 - bst r7,5 - bld r6,3 - bst r8,4 - bld r7,5 - bst r8,3 - bld r8,4 - bst r7,7 - bld r8,3 - bst r9,4 - bld r7,7 - bst r8,2 - bld r9,4 - bst r7,3 - bld r8,2 - bst r7,4 - bld r7,3 - bst r8,0 - bld r7,4 - bst r0,0 - bld r8,0 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r10,2 - bld r10,0 - bst r11,2 - bld r10,2 - bst r11,1 - bld r11,2 - bst r10,5 - bld r11,1 - bst r12,6 - bld r10,5 - bst r13,0 - bld r12,6 - bst r10,3 - bld r13,0 - bst r11,6 - bld r10,3 - bst r13,1 - bld r11,6 - bst r10,7 - bld r13,1 - bst r13,6 - bld r10,7 - bst r13,3 - bld r13,6 - bst r11,7 - bld r13,3 - bst r13,5 - bld r11,7 - bst r12,7 - bld r13,5 - bst r13,4 - bld r12,7 - bst r12,3 - bld r13,4 - bst r11,4 - bld r12,3 - bst r12,1 - bld r11,4 - bst r10,4 - bld r12,1 - bst r12,2 - bld r10,4 - bst r11,0 - bld r12,2 - bst r10,1 - bld r11,0 - bst r10,6 - bld r10,1 - bst r13,2 - bld r10,6 - bst r11,3 - bld r13,2 - bst r11,5 - bld r11,3 - bst r12,5 - bld r11,5 - bst r12,4 - bld r12,5 - bst r12,0 - bld r12,4 - bst r0,0 - bld r12,0 - bst r14,0 - bld r0,0 - bst r14,3 - bld r14,0 - bst r15,7 - bld r14,3 - bst r25,6 - bld r15,7 - bst r25,0 - bld r25,6 - bst r0,0 - bld r25,0 - bst r14,1 - bld r0,0 - bst r14,7 - bld r14,1 - bst r25,7 - bld r14,7 - bst r25,4 - bld r25,7 - bst r24,0 - bld r25,4 - bst r0,0 - bld r24,0 - bst r14,2 - bld r0,0 - bst r15,3 - bld r14,2 - bst r15,6 - bld r15,3 - bst r25,2 - bld r15,6 - bst r15,0 - bld r25,2 - bst r0,0 - bld r15,0 - bst r14,4 - bld r0,0 - bst r24,3 - bld r14,4 - bst r15,5 - bld r24,3 - bst r24,6 - bld r15,5 - bst r25,1 - bld r24,6 - bst r0,0 - bld r25,1 - bst r14,5 - bld r0,0 - bst r24,7 - bld r14,5 - bst r25,5 - bld r24,7 - bst r24,4 - bld r25,5 - bst r24,1 - bld r24,4 - bst r0,0 - bld r24,1 - bst r14,6 - bld r0,0 - bst r25,3 - bld r14,6 - bst r15,4 - bld r25,3 - bst r24,2 - bld r15,4 - bst r15,1 - bld r24,2 - bst r0,0 - bld r15,1 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldd r20,Y+13 - ldd r21,Y+14 - ldd r22,Y+15 - ldd r23,Y+16 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - adc r20,r1 - lsl r20 - rol r21 - adc r20,r1 - lsl r20 - rol r21 - adc r20,r1 - lsl r20 - rol r21 - adc r20,r1 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - ldd r0,Y+1 - std Y+1,r20 - ldd r20,Y+5 - std Y+5,r0 - ldd r0,Y+9 - std Y+9,r20 - std Y+13,r0 - ldd r0,Y+2 - std Y+2,r21 - ldd r21,Y+6 - std Y+6,r0 - ldd r0,Y+10 - std Y+10,r21 - std Y+14,r0 - ldd r0,Y+3 - std Y+3,r22 - ldd r22,Y+7 - std Y+7,r0 - ldd r0,Y+11 - std Y+11,r22 - std Y+15,r0 - ldd r0,Y+4 - std Y+4,r23 - ldd r23,Y+8 - std Y+8,r0 - ldd r0,Y+12 - std Y+12,r23 - std Y+16,r0 - ldi r20,128 - eor r25,r20 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - inc r19 - cpi r19,40 - breq 727f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 307b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 307b -727: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r19,40 - mov r26,r1 -375: - ldd r0,Y+13 - ldd r20,Y+9 - std Y+9,r0 - ldd r0,Y+5 - std Y+5,r20 - ldd r20,Y+1 - std Y+1,r0 - ldd r0,Y+14 - ldd r21,Y+10 - std Y+10,r0 - ldd r0,Y+6 - std Y+6,r21 - ldd r21,Y+2 - std Y+2,r0 - ldd r0,Y+15 - ldd r22,Y+11 - std Y+11,r0 - ldd r0,Y+7 - std Y+7,r22 - ldd r22,Y+3 - std Y+3,r0 - ldd r0,Y+16 - ldd r23,Y+12 - std Y+12,r0 - ldd r0,Y+8 - std Y+8,r23 - ldd r23,Y+4 - std Y+4,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - or r21,r0 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - std Y+13,r20 - std Y+14,r21 - std Y+15,r22 - std Y+16,r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldi r20,128 - eor r25,r20 - dec r19 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - bst r2,1 - bld r0,0 - bst r5,0 - bld r2,1 - bst r2,6 - bld r5,0 - bst r4,1 - bld r2,6 - bst r5,4 - bld r4,1 - bst r2,7 - bld r5,4 - bst r3,1 - bld r2,7 - bst r5,2 - bld r3,1 - bst r4,6 - bld r5,2 - bst r4,5 - bld r4,6 - bst r5,5 - bld r4,5 - bst r5,7 - bld r5,5 - bst r3,7 - bld r5,7 - bst r3,3 - bld r3,7 - bst r3,2 - bld r3,3 - bst r4,2 - bld r3,2 - bst r4,4 - bld r4,2 - bst r2,5 - bld r4,4 - bst r5,1 - bld r2,5 - bst r5,6 - bld r5,1 - bst r4,7 - bld r5,6 - bst r3,5 - bld r4,7 - bst r5,3 - bld r3,5 - bst r3,6 - bld r5,3 - bst r4,3 - bld r3,6 - bst r3,4 - bld r4,3 - bst r2,3 - bld r3,4 - bst r3,0 - bld r2,3 - bst r2,2 - bld r3,0 - bst r4,0 - bld r2,2 - bst r2,4 - bld r4,0 - bst r0,0 - bld r2,4 - bst r6,0 - bld r0,0 - bst r7,0 - bld r6,0 - bst r7,2 - bld r7,0 - bst r9,2 - bld r7,2 - bst r9,6 - bld r9,2 - bst r9,7 - bld r9,6 - bst r8,7 - bld r9,7 - bst r8,5 - bld r8,7 - bst r6,5 - bld r8,5 - bst r6,1 - bld r6,5 - bst r0,0 - bld r6,1 - bst r6,2 - bld r0,0 - bst r9,0 - bld r6,2 - bst r7,6 - bld r9,0 - bst r9,3 - bld r7,6 - bst r8,6 - bld r9,3 - bst r9,5 - bld r8,6 - bst r6,7 - bld r9,5 - bst r8,1 - bld r6,7 - bst r6,4 - bld r8,1 - bst r7,1 - bld r6,4 - bst r0,0 - bld r7,1 - bst r6,3 - bld r0,0 - bst r8,0 - bld r6,3 - bst r7,4 - bld r8,0 - bst r7,3 - bld r7,4 - bst r8,2 - bld r7,3 - bst r9,4 - bld r8,2 - bst r7,7 - bld r9,4 - bst r8,3 - bld r7,7 - bst r8,4 - bld r8,3 - bst r7,5 - bld r8,4 - bst r0,0 - bld r7,5 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r12,0 - bld r10,0 - bst r12,4 - bld r12,0 - bst r12,5 - bld r12,4 - bst r11,5 - bld r12,5 - bst r11,3 - bld r11,5 - bst r13,2 - bld r11,3 - bst r10,6 - bld r13,2 - bst r10,1 - bld r10,6 - bst r11,0 - bld r10,1 - bst r12,2 - bld r11,0 - bst r10,4 - bld r12,2 - bst r12,1 - bld r10,4 - bst r11,4 - bld r12,1 - bst r12,3 - bld r11,4 - bst r13,4 - bld r12,3 - bst r12,7 - bld r13,4 - bst r13,5 - bld r12,7 - bst r11,7 - bld r13,5 - bst r13,3 - bld r11,7 - bst r13,6 - bld r13,3 - bst r10,7 - bld r13,6 - bst r13,1 - bld r10,7 - bst r11,6 - bld r13,1 - bst r10,3 - bld r11,6 - bst r13,0 - bld r10,3 - bst r12,6 - bld r13,0 - bst r10,5 - bld r12,6 - bst r11,1 - bld r10,5 - bst r11,2 - bld r11,1 - bst r10,2 - bld r11,2 - bst r0,0 - bld r10,2 - bst r14,0 - bld r0,0 - bst r25,0 - bld r14,0 - bst r25,6 - bld r25,0 - bst r15,7 - bld r25,6 - bst r14,3 - bld r15,7 - bst r0,0 - bld r14,3 - bst r14,1 - bld r0,0 - bst r24,0 - bld r14,1 - bst r25,4 - bld r24,0 - bst r25,7 - bld r25,4 - bst r14,7 - bld r25,7 - bst r0,0 - bld r14,7 - bst r14,2 - bld r0,0 - bst r15,0 - bld r14,2 - bst r25,2 - bld r15,0 - bst r15,6 - bld r25,2 - bst r15,3 - bld r15,6 - bst r0,0 - bld r15,3 - bst r14,4 - bld r0,0 - bst r25,1 - bld r14,4 - bst r24,6 - bld r25,1 - bst r15,5 - bld r24,6 - bst r24,3 - bld r15,5 - bst r0,0 - bld r24,3 - bst r14,5 - bld r0,0 - bst r24,1 - bld r14,5 - bst r24,4 - bld r24,1 - bst r25,5 - bld r24,4 - bst r24,7 - bld r25,5 - bst r0,0 - bld r24,7 - bst r14,6 - bld r0,0 - bst r15,1 - bld r14,6 - bst r24,2 - bld r15,1 - bst r15,4 - bld r24,2 - bst r25,3 - bld r15,4 - bst r0,0 - bld r25,3 - movw r20,r14 - movw r22,r24 - movw r14,r2 - movw r24,r4 - movw r2,r20 - movw r4,r22 - and r20,r6 - and r21,r7 - and r22,r8 - and r23,r9 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - cp r19,r1 - breq 791f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 375b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 375b -791: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-full-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-full-avr.S deleted file mode 100644 index 3a7e6fb..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-full-avr.S +++ /dev/null @@ -1,8173 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - ldi r24,4 -33: - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r29 - ror r28 - ror r0 - lsr r29 - ror r28 - ror r0 - or r29,r0 - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r28 - mov r28,r4 - mov r4,r0 - mov r0,r29 - mov r29,r5 - mov r5,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - mov r0,r6 - mov r6,r10 - mov r10,r0 - mov r0,r7 - mov r7,r11 - mov r11,r0 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - st Z,r29 - std Z+1,r23 - std Z+2,r28 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+4,r29 - std Z+5,r23 - std Z+6,r28 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r28,Z+10 - ldd r29,Z+11 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+8,r29 - std Z+9,r23 - std Z+10,r28 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+12,r29 - std Z+13,r23 - std Z+14,r28 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+16,r29 - std Z+17,r23 - std Z+18,r28 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+20,r29 - std Z+21,r23 - std Z+22,r28 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+24,r29 - std Z+25,r23 - std Z+26,r28 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+28,r29 - std Z+29,r23 - std Z+30,r28 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - adiw r30,40 - movw r26,r30 - subi r26,80 - sbc r27,r1 - ldi r24,6 -1274: - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r2 - eor r19,r3 - andi r18,51 - andi r19,51 - eor r2,r18 - eor r3,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - movw r18,r22 - movw r20,r28 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - andi r28,204 - andi r29,204 - or r28,r21 - or r29,r18 - or r22,r19 - or r23,r20 - movw r18,r28 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r28 - eor r19,r29 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r28 - std Z+5,r29 - std Z+6,r22 - std Z+7,r23 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - swap r3 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - swap r5 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r29 - adc r29,r1 - lsl r29 - adc r29,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r28 - std Z+15,r29 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - ldi r25,85 - and r2,r25 - and r3,r25 - and r4,r25 - and r5,r25 - or r2,r19 - or r3,r20 - or r4,r21 - or r5,r18 - std Z+16,r4 - std Z+17,r5 - std Z+18,r2 - std Z+19,r3 - movw r18,r22 - movw r20,r28 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - andi r28,170 - andi r29,170 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - or r22,r18 - or r23,r19 - or r28,r20 - or r29,r21 - std Z+20,r29 - std Z+21,r22 - std Z+22,r23 - std Z+23,r28 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r14,r18 - movw r16,r20 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - eor r14,r18 - eor r15,r19 - eor r16,r20 - eor r17,r21 - ldi r25,8 - and r14,r25 - and r15,r25 - andi r16,8 - andi r17,8 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - ldi r17,15 - and r2,r17 - and r3,r17 - and r4,r17 - and r5,r17 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - movw r18,r28 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r2,r22 - movw r4,r28 - ldi r16,1 - and r2,r16 - and r3,r16 - and r4,r16 - and r5,r16 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - or r2,r18 - or r3,r19 - movw r18,r28 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r2,r18 - or r3,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r4,r18 - or r5,r19 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r4,r22 - or r5,r23 - std Z+28,r2 - std Z+29,r3 - std Z+30,r4 - std Z+31,r5 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - std Z+32,r3 - std Z+33,r2 - std Z+34,r4 - std Z+35,r5 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r28 - mov r28,r29 - mov r29,r0 - lsl r28 - rol r29 - adc r28,r1 - lsl r28 - rol r29 - adc r28,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r28 - std Z+39,r29 - dec r24 - breq 1733f - adiw r30,40 - rjmp 1274b -1733: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rjmp 1021f -283: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -1021: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,160 - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rjmp 1024f -286: - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r1 - lsr r22 - ror r0 - lsr r22 - ror r0 - or r22,r0 - mov r0,r1 - lsr r23 - ror r0 - lsr r23 - ror r0 - or r23,r0 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - swap r4 - swap r5 - swap r6 - swap r7 - lsl r8 - adc r8,r1 - lsl r8 - adc r8,r1 - lsl r9 - adc r9,r1 - lsl r9 - adc r9,r1 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,119 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,17 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1024: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - rjmp 1049f -311: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,17 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,119 - and r14,r16 - and r15,r16 - andi r24,119 - andi r25,119 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - swap r6 - swap r7 - swap r8 - swap r9 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - mov r0,r1 - lsr r12 - ror r0 - lsr r12 - ror r0 - or r12,r0 - mov r0,r1 - lsr r13 - ror r0 - lsr r13 - ror r0 - or r13,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - ret -1049: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,160 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - rjmp 1052f -314: - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - mov r0,r1 - lsr r4 - ror r0 - lsr r4 - ror r0 - or r4,r0 - mov r0,r1 - lsr r5 - ror r0 - lsr r5 - ror r0 - or r5,r0 - swap r6 - swap r7 - swap r8 - swap r9 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - com r2 - com r3 - com r4 - com r5 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - com r2 - com r3 - com r4 - com r5 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,119 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r25 - ror r24 - ror r15 - ror r14 - lsr r25 - ror r24 - ror r15 - ror r14 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,17 - and r14,r16 - and r15,r16 - andi r24,17 - andi r25,17 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - ret -1052: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-small-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-small-avr.S deleted file mode 100644 index 6f2d68b..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-small-avr.S +++ /dev/null @@ -1,9331 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -33: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 329f - rcall 329f - rjmp 1541f -329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -1067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -934: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - cpse r16,r1 - rjmp 934b - rjmp 1431f -1086: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1431: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r19,20 -1: - ld r2,Z+ - ld r3,Z+ - ld r4,Z+ - ld r5,Z+ - std Y+1,r2 - std Y+2,r3 - std Y+3,r4 - std Y+4,r5 - adiw r28,4 - dec r19 - brne 1b - subi r28,80 - sbc r29,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,20 - adiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,40 - sbiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,60 - adiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,80 - sbiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,100 - adiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,120 - sbiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 357f - rjmp 1570f -357: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,17 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,119 - and r14,r16 - and r15,r16 - andi r24,119 - andi r25,119 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - swap r6 - swap r7 - swap r8 - swap r9 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - mov r0,r1 - lsr r12 - ror r0 - lsr r12 - ror r0 - or r12,r0 - mov r0,r1 - lsr r13 - ror r0 - lsr r13 - ror r0 - or r13,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - ret -1095: - movw r30,r26 - sbiw r30,40 - push r5 - push r4 - push r3 - push r2 - push r9 - push r8 - push r7 - push r6 - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,68 - andi r21,68 - andi r22,85 - andi r23,85 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - st Z,r26 - std Z+1,r27 - std Z+2,r16 - std Z+3,r17 - movw r20,r2 - movw r22,r4 - andi r20,51 - andi r21,51 - andi r22,51 - andi r23,51 - ldi r19,204 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - or r4,r23 - or r5,r20 - or r2,r21 - or r3,r22 - movw r20,r4 - movw r22,r2 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r4 - eor r21,r5 - eor r22,r2 - eor r23,r3 - mov r20,r1 - andi r21,17 - andi r22,85 - andi r23,85 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - std Z+4,r4 - std Z+5,r5 - std Z+6,r2 - std Z+7,r3 - ldd r2,Z+8 - ldd r3,Z+9 - ldd r4,Z+10 - ldd r5,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r16 - adc r16,r1 - lsl r16 - adc r16,r1 - swap r17 - std Z+8,r26 - std Z+9,r27 - std Z+10,r16 - std Z+11,r17 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - std Z+12,r2 - std Z+13,r3 - std Z+14,r4 - std Z+15,r5 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r16,Z+22 - ldd r17,Z+23 - movw r20,r26 - movw r22,r16 - andi r20,170 - andi r21,170 - andi r22,170 - andi r23,170 - andi r26,85 - andi r27,85 - andi r16,85 - andi r17,85 - or r26,r21 - or r27,r22 - or r16,r23 - or r17,r20 - std Z+16,r16 - std Z+17,r17 - std Z+18,r26 - std Z+19,r27 - movw r20,r2 - movw r22,r4 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - ldi r19,170 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - or r2,r20 - or r3,r21 - or r4,r22 - or r5,r23 - std Z+20,r5 - std Z+21,r2 - std Z+22,r3 - std Z+23,r4 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r16,Z+30 - ldd r17,Z+31 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,120 - andi r21,120 - andi r22,120 - andi r23,120 - movw r6,r20 - movw r8,r22 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldi r19,8 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r26,15 - andi r27,15 - andi r16,15 - andi r17,15 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - std Z+24,r26 - std Z+25,r27 - std Z+26,r16 - std Z+27,r17 - movw r20,r4 - lsr r21 - ror r20 - lsr r21 - ror r20 - andi r20,48 - andi r21,48 - movw r26,r2 - movw r16,r4 - andi r26,1 - andi r27,1 - andi r16,1 - andi r17,1 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - or r26,r20 - or r27,r21 - movw r20,r4 - lsl r20 - rol r21 - lsl r20 - rol r21 - andi r20,192 - andi r21,192 - or r26,r20 - or r27,r21 - movw r20,r2 - andi r20,224 - andi r21,224 - lsr r21 - ror r20 - or r16,r20 - or r17,r21 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,7 - andi r21,7 - andi r22,7 - andi r23,7 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - ldi r19,16 - and r2,r19 - and r3,r19 - lsl r2 - rol r3 - lsl r2 - rol r3 - lsl r2 - rol r3 - or r16,r2 - or r17,r3 - std Z+28,r26 - std Z+29,r27 - std Z+30,r16 - std Z+31,r17 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r16,Z+38 - ldd r17,Z+39 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r16 - std Z+35,r17 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r4 - mov r4,r5 - mov r5,r0 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - pop r6 - pop r7 - pop r8 - pop r9 - pop r2 - pop r3 - pop r4 - pop r5 - movw r26,r30 - ret -1570: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r26,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r19,40 - mov r26,r1 -939: - ldd r0,Y+13 - ldd r20,Y+9 - std Y+9,r0 - ldd r0,Y+5 - std Y+5,r20 - ldd r20,Y+1 - std Y+1,r0 - ldd r0,Y+14 - ldd r21,Y+10 - std Y+10,r0 - ldd r0,Y+6 - std Y+6,r21 - ldd r21,Y+2 - std Y+2,r0 - ldd r0,Y+15 - ldd r22,Y+11 - std Y+11,r0 - ldd r0,Y+7 - std Y+7,r22 - ldd r22,Y+3 - std Y+3,r0 - ldd r0,Y+16 - ldd r23,Y+12 - std Y+12,r0 - ldd r0,Y+8 - std Y+8,r23 - ldd r23,Y+4 - std Y+4,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - or r21,r0 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - std Y+13,r20 - std Y+14,r21 - std Y+15,r22 - std Y+16,r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldi r20,128 - eor r25,r20 - dec r19 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - bst r2,1 - bld r0,0 - bst r5,0 - bld r2,1 - bst r2,6 - bld r5,0 - bst r4,1 - bld r2,6 - bst r5,4 - bld r4,1 - bst r2,7 - bld r5,4 - bst r3,1 - bld r2,7 - bst r5,2 - bld r3,1 - bst r4,6 - bld r5,2 - bst r4,5 - bld r4,6 - bst r5,5 - bld r4,5 - bst r5,7 - bld r5,5 - bst r3,7 - bld r5,7 - bst r3,3 - bld r3,7 - bst r3,2 - bld r3,3 - bst r4,2 - bld r3,2 - bst r4,4 - bld r4,2 - bst r2,5 - bld r4,4 - bst r5,1 - bld r2,5 - bst r5,6 - bld r5,1 - bst r4,7 - bld r5,6 - bst r3,5 - bld r4,7 - bst r5,3 - bld r3,5 - bst r3,6 - bld r5,3 - bst r4,3 - bld r3,6 - bst r3,4 - bld r4,3 - bst r2,3 - bld r3,4 - bst r3,0 - bld r2,3 - bst r2,2 - bld r3,0 - bst r4,0 - bld r2,2 - bst r2,4 - bld r4,0 - bst r0,0 - bld r2,4 - bst r6,0 - bld r0,0 - bst r7,0 - bld r6,0 - bst r7,2 - bld r7,0 - bst r9,2 - bld r7,2 - bst r9,6 - bld r9,2 - bst r9,7 - bld r9,6 - bst r8,7 - bld r9,7 - bst r8,5 - bld r8,7 - bst r6,5 - bld r8,5 - bst r6,1 - bld r6,5 - bst r0,0 - bld r6,1 - bst r6,2 - bld r0,0 - bst r9,0 - bld r6,2 - bst r7,6 - bld r9,0 - bst r9,3 - bld r7,6 - bst r8,6 - bld r9,3 - bst r9,5 - bld r8,6 - bst r6,7 - bld r9,5 - bst r8,1 - bld r6,7 - bst r6,4 - bld r8,1 - bst r7,1 - bld r6,4 - bst r0,0 - bld r7,1 - bst r6,3 - bld r0,0 - bst r8,0 - bld r6,3 - bst r7,4 - bld r8,0 - bst r7,3 - bld r7,4 - bst r8,2 - bld r7,3 - bst r9,4 - bld r8,2 - bst r7,7 - bld r9,4 - bst r8,3 - bld r7,7 - bst r8,4 - bld r8,3 - bst r7,5 - bld r8,4 - bst r0,0 - bld r7,5 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r12,0 - bld r10,0 - bst r12,4 - bld r12,0 - bst r12,5 - bld r12,4 - bst r11,5 - bld r12,5 - bst r11,3 - bld r11,5 - bst r13,2 - bld r11,3 - bst r10,6 - bld r13,2 - bst r10,1 - bld r10,6 - bst r11,0 - bld r10,1 - bst r12,2 - bld r11,0 - bst r10,4 - bld r12,2 - bst r12,1 - bld r10,4 - bst r11,4 - bld r12,1 - bst r12,3 - bld r11,4 - bst r13,4 - bld r12,3 - bst r12,7 - bld r13,4 - bst r13,5 - bld r12,7 - bst r11,7 - bld r13,5 - bst r13,3 - bld r11,7 - bst r13,6 - bld r13,3 - bst r10,7 - bld r13,6 - bst r13,1 - bld r10,7 - bst r11,6 - bld r13,1 - bst r10,3 - bld r11,6 - bst r13,0 - bld r10,3 - bst r12,6 - bld r13,0 - bst r10,5 - bld r12,6 - bst r11,1 - bld r10,5 - bst r11,2 - bld r11,1 - bst r10,2 - bld r11,2 - bst r0,0 - bld r10,2 - bst r14,0 - bld r0,0 - bst r25,0 - bld r14,0 - bst r25,6 - bld r25,0 - bst r15,7 - bld r25,6 - bst r14,3 - bld r15,7 - bst r0,0 - bld r14,3 - bst r14,1 - bld r0,0 - bst r24,0 - bld r14,1 - bst r25,4 - bld r24,0 - bst r25,7 - bld r25,4 - bst r14,7 - bld r25,7 - bst r0,0 - bld r14,7 - bst r14,2 - bld r0,0 - bst r15,0 - bld r14,2 - bst r25,2 - bld r15,0 - bst r15,6 - bld r25,2 - bst r15,3 - bld r15,6 - bst r0,0 - bld r15,3 - bst r14,4 - bld r0,0 - bst r25,1 - bld r14,4 - bst r24,6 - bld r25,1 - bst r15,5 - bld r24,6 - bst r24,3 - bld r15,5 - bst r0,0 - bld r24,3 - bst r14,5 - bld r0,0 - bst r24,1 - bld r14,5 - bst r24,4 - bld r24,1 - bst r25,5 - bld r24,4 - bst r24,7 - bld r25,5 - bst r0,0 - bld r24,7 - bst r14,6 - bld r0,0 - bst r15,1 - bld r14,6 - bst r24,2 - bld r15,1 - bst r15,4 - bld r24,2 - bst r25,3 - bld r15,4 - bst r0,0 - bld r25,3 - movw r20,r14 - movw r22,r24 - movw r14,r2 - movw r24,r4 - movw r2,r20 - movw r4,r22 - and r20,r6 - and r21,r7 - and r22,r8 - and r23,r9 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - cp r19,r1 - breq 1355f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 939b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 939b -1355: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-tiny-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-tiny-avr.S deleted file mode 100644 index dd1f7b9..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-gift128n-tiny-avr.S +++ /dev/null @@ -1,9480 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - st Z,r22 - std Z+1,r23 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1585f - rcall 1585f - rjmp 2797f -1585: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2323: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2797: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -370: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - cpse r16,r1 - rjmp 370b - rjmp 867f -522: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -867: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - ldd r6,Z+4 - ldd r7,Z+5 - ldd r8,Z+6 - ldd r9,Z+7 - ldd r10,Z+8 - ldd r11,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - ldd r14,Z+12 - ldd r15,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - st Z+,r24 - st Z+,r25 - ldi r19,4 -35: - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - mov r0,r4 - mov r4,r8 - mov r8,r0 - mov r0,r5 - mov r5,r9 - mov r9,r0 - st Z+,r14 - st Z+,r15 - st Z+,r24 - st Z+,r25 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - mov r0,r12 - mov r12,r24 - mov r24,r0 - mov r0,r13 - mov r13,r25 - mov r25,r0 - dec r19 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r19,2 -121: - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - st Z,r5 - std Z+1,r3 - std Z+2,r4 - std Z+3,r2 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+4,r5 - std Z+5,r3 - std Z+6,r4 - std Z+7,r2 - ldd r2,Z+8 - ldd r3,Z+9 - ldd r4,Z+10 - ldd r5,Z+11 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+8,r5 - std Z+9,r3 - std Z+10,r4 - std Z+11,r2 - ldd r2,Z+12 - ldd r3,Z+13 - ldd r4,Z+14 - ldd r5,Z+15 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+12,r5 - std Z+13,r3 - std Z+14,r4 - std Z+15,r2 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r21 - rol r22 - rol r23 - rol r0 - movw r20,r22 - mov r22,r0 - mov r23,r1 - eor r20,r2 - eor r21,r3 - andi r20,170 - andi r21,170 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r0,r1 - lsr r22 - ror r21 - ror r20 - ror r0 - movw r22,r20 - mov r21,r0 - mov r20,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+16,r5 - std Z+17,r3 - std Z+18,r4 - std Z+19,r2 - ldd r2,Z+20 - ldd r3,Z+21 - ldd r4,Z+22 - ldd r5,Z+23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r21 - rol r22 - rol r23 - rol r0 - movw r20,r22 - mov r22,r0 - mov r23,r1 - eor r20,r2 - eor r21,r3 - andi r20,170 - andi r21,170 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r0,r1 - lsr r22 - ror r21 - ror r20 - ror r0 - movw r22,r20 - mov r21,r0 - mov r20,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+20,r5 - std Z+21,r3 - std Z+22,r4 - std Z+23,r2 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,10 - andi r21,10 - andi r22,10 - andi r23,10 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,204 - mov r21,r1 - andi r22,204 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+24,r5 - std Z+25,r3 - std Z+26,r4 - std Z+27,r2 - ldd r2,Z+28 - ldd r3,Z+29 - ldd r4,Z+30 - ldd r5,Z+31 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,10 - andi r21,10 - andi r22,10 - andi r23,10 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,204 - mov r21,r1 - andi r22,204 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+28,r5 - std Z+29,r3 - std Z+30,r4 - std Z+31,r2 - dec r19 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,20 - adiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,60 - adiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,100 - adiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 1613f - rjmp 2826f -1613: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,17 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,119 - and r14,r16 - and r15,r16 - andi r24,119 - andi r25,119 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - swap r6 - swap r7 - swap r8 - swap r9 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - mov r0,r1 - lsr r12 - ror r0 - lsr r12 - ror r0 - or r12,r0 - mov r0,r1 - lsr r13 - ror r0 - lsr r13 - ror r0 - or r13,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - ret -2351: - movw r30,r26 - sbiw r30,40 - push r5 - push r4 - push r3 - push r2 - push r9 - push r8 - push r7 - push r6 - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,68 - andi r21,68 - andi r22,85 - andi r23,85 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - st Z,r26 - std Z+1,r27 - std Z+2,r16 - std Z+3,r17 - movw r20,r2 - movw r22,r4 - andi r20,51 - andi r21,51 - andi r22,51 - andi r23,51 - ldi r19,204 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - or r4,r23 - or r5,r20 - or r2,r21 - or r3,r22 - movw r20,r4 - movw r22,r2 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r4 - eor r21,r5 - eor r22,r2 - eor r23,r3 - mov r20,r1 - andi r21,17 - andi r22,85 - andi r23,85 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - std Z+4,r4 - std Z+5,r5 - std Z+6,r2 - std Z+7,r3 - ldd r2,Z+8 - ldd r3,Z+9 - ldd r4,Z+10 - ldd r5,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r16 - adc r16,r1 - lsl r16 - adc r16,r1 - swap r17 - std Z+8,r26 - std Z+9,r27 - std Z+10,r16 - std Z+11,r17 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - std Z+12,r2 - std Z+13,r3 - std Z+14,r4 - std Z+15,r5 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r16,Z+22 - ldd r17,Z+23 - movw r20,r26 - movw r22,r16 - andi r20,170 - andi r21,170 - andi r22,170 - andi r23,170 - andi r26,85 - andi r27,85 - andi r16,85 - andi r17,85 - or r26,r21 - or r27,r22 - or r16,r23 - or r17,r20 - std Z+16,r16 - std Z+17,r17 - std Z+18,r26 - std Z+19,r27 - movw r20,r2 - movw r22,r4 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - ldi r19,170 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - or r2,r20 - or r3,r21 - or r4,r22 - or r5,r23 - std Z+20,r5 - std Z+21,r2 - std Z+22,r3 - std Z+23,r4 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r16,Z+30 - ldd r17,Z+31 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,120 - andi r21,120 - andi r22,120 - andi r23,120 - movw r6,r20 - movw r8,r22 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldi r19,8 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r26,15 - andi r27,15 - andi r16,15 - andi r17,15 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - std Z+24,r26 - std Z+25,r27 - std Z+26,r16 - std Z+27,r17 - movw r20,r4 - lsr r21 - ror r20 - lsr r21 - ror r20 - andi r20,48 - andi r21,48 - movw r26,r2 - movw r16,r4 - andi r26,1 - andi r27,1 - andi r16,1 - andi r17,1 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - or r26,r20 - or r27,r21 - movw r20,r4 - lsl r20 - rol r21 - lsl r20 - rol r21 - andi r20,192 - andi r21,192 - or r26,r20 - or r27,r21 - movw r20,r2 - andi r20,224 - andi r21,224 - lsr r21 - ror r20 - or r16,r20 - or r17,r21 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,7 - andi r21,7 - andi r22,7 - andi r23,7 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - ldi r19,16 - and r2,r19 - and r3,r19 - lsl r2 - rol r3 - lsl r2 - rol r3 - lsl r2 - rol r3 - or r16,r2 - or r17,r3 - std Z+28,r26 - std Z+29,r27 - std Z+30,r16 - std Z+31,r17 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r16,Z+38 - ldd r17,Z+39 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r16 - std Z+35,r17 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r4 - mov r4,r5 - mov r5,r0 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - pop r6 - pop r7 - pop r8 - pop r9 - pop r2 - pop r3 - pop r4 - pop r5 - movw r26,r30 - ret -2826: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r26,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r19,40 - mov r26,r1 -375: - ldd r0,Y+13 - ldd r20,Y+9 - std Y+9,r0 - ldd r0,Y+5 - std Y+5,r20 - ldd r20,Y+1 - std Y+1,r0 - ldd r0,Y+14 - ldd r21,Y+10 - std Y+10,r0 - ldd r0,Y+6 - std Y+6,r21 - ldd r21,Y+2 - std Y+2,r0 - ldd r0,Y+15 - ldd r22,Y+11 - std Y+11,r0 - ldd r0,Y+7 - std Y+7,r22 - ldd r22,Y+3 - std Y+3,r0 - ldd r0,Y+16 - ldd r23,Y+12 - std Y+12,r0 - ldd r0,Y+8 - std Y+8,r23 - ldd r23,Y+4 - std Y+4,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - or r21,r0 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - std Y+13,r20 - std Y+14,r21 - std Y+15,r22 - std Y+16,r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldi r20,128 - eor r25,r20 - dec r19 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - bst r2,1 - bld r0,0 - bst r5,0 - bld r2,1 - bst r2,6 - bld r5,0 - bst r4,1 - bld r2,6 - bst r5,4 - bld r4,1 - bst r2,7 - bld r5,4 - bst r3,1 - bld r2,7 - bst r5,2 - bld r3,1 - bst r4,6 - bld r5,2 - bst r4,5 - bld r4,6 - bst r5,5 - bld r4,5 - bst r5,7 - bld r5,5 - bst r3,7 - bld r5,7 - bst r3,3 - bld r3,7 - bst r3,2 - bld r3,3 - bst r4,2 - bld r3,2 - bst r4,4 - bld r4,2 - bst r2,5 - bld r4,4 - bst r5,1 - bld r2,5 - bst r5,6 - bld r5,1 - bst r4,7 - bld r5,6 - bst r3,5 - bld r4,7 - bst r5,3 - bld r3,5 - bst r3,6 - bld r5,3 - bst r4,3 - bld r3,6 - bst r3,4 - bld r4,3 - bst r2,3 - bld r3,4 - bst r3,0 - bld r2,3 - bst r2,2 - bld r3,0 - bst r4,0 - bld r2,2 - bst r2,4 - bld r4,0 - bst r0,0 - bld r2,4 - bst r6,0 - bld r0,0 - bst r7,0 - bld r6,0 - bst r7,2 - bld r7,0 - bst r9,2 - bld r7,2 - bst r9,6 - bld r9,2 - bst r9,7 - bld r9,6 - bst r8,7 - bld r9,7 - bst r8,5 - bld r8,7 - bst r6,5 - bld r8,5 - bst r6,1 - bld r6,5 - bst r0,0 - bld r6,1 - bst r6,2 - bld r0,0 - bst r9,0 - bld r6,2 - bst r7,6 - bld r9,0 - bst r9,3 - bld r7,6 - bst r8,6 - bld r9,3 - bst r9,5 - bld r8,6 - bst r6,7 - bld r9,5 - bst r8,1 - bld r6,7 - bst r6,4 - bld r8,1 - bst r7,1 - bld r6,4 - bst r0,0 - bld r7,1 - bst r6,3 - bld r0,0 - bst r8,0 - bld r6,3 - bst r7,4 - bld r8,0 - bst r7,3 - bld r7,4 - bst r8,2 - bld r7,3 - bst r9,4 - bld r8,2 - bst r7,7 - bld r9,4 - bst r8,3 - bld r7,7 - bst r8,4 - bld r8,3 - bst r7,5 - bld r8,4 - bst r0,0 - bld r7,5 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r12,0 - bld r10,0 - bst r12,4 - bld r12,0 - bst r12,5 - bld r12,4 - bst r11,5 - bld r12,5 - bst r11,3 - bld r11,5 - bst r13,2 - bld r11,3 - bst r10,6 - bld r13,2 - bst r10,1 - bld r10,6 - bst r11,0 - bld r10,1 - bst r12,2 - bld r11,0 - bst r10,4 - bld r12,2 - bst r12,1 - bld r10,4 - bst r11,4 - bld r12,1 - bst r12,3 - bld r11,4 - bst r13,4 - bld r12,3 - bst r12,7 - bld r13,4 - bst r13,5 - bld r12,7 - bst r11,7 - bld r13,5 - bst r13,3 - bld r11,7 - bst r13,6 - bld r13,3 - bst r10,7 - bld r13,6 - bst r13,1 - bld r10,7 - bst r11,6 - bld r13,1 - bst r10,3 - bld r11,6 - bst r13,0 - bld r10,3 - bst r12,6 - bld r13,0 - bst r10,5 - bld r12,6 - bst r11,1 - bld r10,5 - bst r11,2 - bld r11,1 - bst r10,2 - bld r11,2 - bst r0,0 - bld r10,2 - bst r14,0 - bld r0,0 - bst r25,0 - bld r14,0 - bst r25,6 - bld r25,0 - bst r15,7 - bld r25,6 - bst r14,3 - bld r15,7 - bst r0,0 - bld r14,3 - bst r14,1 - bld r0,0 - bst r24,0 - bld r14,1 - bst r25,4 - bld r24,0 - bst r25,7 - bld r25,4 - bst r14,7 - bld r25,7 - bst r0,0 - bld r14,7 - bst r14,2 - bld r0,0 - bst r15,0 - bld r14,2 - bst r25,2 - bld r15,0 - bst r15,6 - bld r25,2 - bst r15,3 - bld r15,6 - bst r0,0 - bld r15,3 - bst r14,4 - bld r0,0 - bst r25,1 - bld r14,4 - bst r24,6 - bld r25,1 - bst r15,5 - bld r24,6 - bst r24,3 - bld r15,5 - bst r0,0 - bld r24,3 - bst r14,5 - bld r0,0 - bst r24,1 - bld r14,5 - bst r24,4 - bld r24,1 - bst r25,5 - bld r24,4 - bst r24,7 - bld r25,5 - bst r0,0 - bld r24,7 - bst r14,6 - bld r0,0 - bst r15,1 - bld r14,6 - bst r24,2 - bld r15,1 - bst r15,4 - bld r24,2 - bst r25,3 - bld r15,4 - bst r0,0 - bld r25,3 - movw r20,r14 - movw r22,r24 - movw r14,r2 - movw r24,r4 - movw r2,r20 - movw r4,r22 - and r20,r6 - and r21,r7 - and r22,r8 - and r23,r9 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - cp r19,r1 - breq 791f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 375b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 375b -791: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-util.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/estate.c b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/estate.c index 355aa92..a570791 100644 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/estate.c +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/estate.c @@ -48,7 +48,7 @@ aead_cipher_t const estate_twegift_cipher = { static void estate_twegift_fcbc (const gift128n_key_schedule_t *ks, unsigned char tag[16], const unsigned char *m, unsigned long long mlen, - unsigned char tweak1, unsigned char tweak2) + uint32_t tweak1, uint32_t tweak2) { while (mlen > 16) { lw_xor_block(tag, m, 16); @@ -84,24 +84,29 @@ static void estate_twegift_authenticate { /* Handle the case where both the message and associated data are empty */ if (mlen == 0 && adlen == 0) { - gift128t_encrypt(ks, tag, tag, /*tweak=*/8); + gift128t_encrypt(ks, tag, tag, GIFT128T_TWEAK_8); return; } /* Encrypt the nonce */ - gift128t_encrypt(ks, tag, tag, /*tweak=*/1); + gift128t_encrypt(ks, tag, tag, GIFT128T_TWEAK_1); /* Compute the FCBC MAC over the associated data */ if (adlen != 0) { - if (mlen != 0) - estate_twegift_fcbc(ks, tag, ad, adlen, /*tweak1=*/2, /*tweak2=*/3); - else - estate_twegift_fcbc(ks, tag, ad, adlen, /*tweak1=*/6, /*tweak2=*/7); + if (mlen != 0) { + estate_twegift_fcbc + (ks, tag, ad, adlen, GIFT128T_TWEAK_2, GIFT128T_TWEAK_3); + } else { + estate_twegift_fcbc + (ks, tag, ad, adlen, GIFT128T_TWEAK_6, GIFT128T_TWEAK_7); + } } /* Compute the FCBC MAC over the message data */ - if (mlen != 0) - estate_twegift_fcbc(ks, tag, m, mlen, /*tweak1=*/4, /*tweak2=*/5); + if (mlen != 0) { + estate_twegift_fcbc + (ks, tag, m, mlen, GIFT128T_TWEAK_4, GIFT128T_TWEAK_5); + } } /** @@ -148,8 +153,7 @@ int estate_twegift_aead_encrypt *clen = mlen + ESTATE_TWEGIFT_TAG_SIZE; /* Set up the key schedule and copy the nonce into the tag */ - if (!gift128n_init(&ks, k, ESTATE_TWEGIFT_KEY_SIZE)) - return -1; + gift128n_init(&ks, k); memcpy(tag, npub, 16); /* Authenticate the associated data and plaintext */ @@ -181,8 +185,7 @@ int estate_twegift_aead_decrypt *mlen = clen - ESTATE_TWEGIFT_TAG_SIZE; /* Set up the key schedule and copy the nonce into the tag */ - if (!gift128n_init(&ks, k, ESTATE_TWEGIFT_KEY_SIZE)) - return -1; + gift128n_init(&ks, k); memcpy(tag, npub, 16); /* Decrypt the ciphertext to generate the plaintext */ diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128-config.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128-config.h new file mode 100644 index 0000000..62131ba --- /dev/null +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128-config.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIFT128_CONFIG_H +#define LW_INTERNAL_GIFT128_CONFIG_H + +/** + * \file internal-gift128-config.h + * \brief Configures the variant of GIFT-128 to use. + */ + +/** + * \brief Select the full variant of GIFT-128. + * + * The full variant requires 320 bytes for the key schedule and uses the + * fixslicing method to implement encryption and decryption. + */ +#define GIFT128_VARIANT_FULL 0 + +/** + * \brief Select the small variant of GIFT-128. + * + * The small variant requires 80 bytes for the key schedule. The rest + * of the key schedule is expanded on the fly during encryption. + * + * The fixslicing method is used to implement encryption and the slower + * bitslicing method is used to implement decryption. The small variant + * is suitable when memory is at a premium, decryption is not needed, + * but encryption performance is still important. + */ +#define GIFT128_VARIANT_SMALL 1 + +/** + * \brief Select the tiny variant of GIFT-128. + * + * The tiny variant requires 16 bytes for the key schedule and uses the + * bitslicing method to implement encryption and decryption. It is suitable + * for use when memory is very tight and performance is not critical. + */ +#define GIFT128_VARIANT_TINY 2 + +/** + * \def GIFT128_VARIANT + * \brief Selects the default variant of GIFT-128 to use on this platform. + */ +/** + * \def GIFT128_VARIANT_ASM + * \brief Defined to 1 if the GIFT-128 implementation has been replaced + * with an assembly code version. + */ +#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 1 +#endif +#if !defined(GIFT128_VARIANT) +#define GIFT128_VARIANT GIFT128_VARIANT_FULL +#endif +#if !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 0 +#endif + +#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.c b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.c index 681dbc8..c6ac5ec 100644 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.c +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.c @@ -23,8 +23,12 @@ #include "internal-gift128.h" #include "internal-util.h" +#if !GIFT128_VARIANT_ASM + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC[40] = { +static uint32_t const GIFT128_RC_fixsliced[40] = { 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, @@ -34,6 +38,246 @@ static uint32_t const GIFT128_RC[40] = { 0xc001a000, 0x14500002, 0x01020181, 0x8000001a }; +#endif + +#if GIFT128_VARIANT != GIFT128_VARIANT_FULL + +/* Round constants for GIFT-128 in the bitsliced representation */ +static uint8_t const GIFT128_RC[40] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, + 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A +}; + +#endif + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 8 bits with respect to the next: + * + * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 + * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 + * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 + * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 + * + * The most efficient permutation from the online generator was P3, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P3 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate8(_x); \ + } while (0) +#define PERM1(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate16(_x); \ + } while (0) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate24(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +#define INV_PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x00550055, 9); \ + bit_permute_step(x, 0x00003333, 18); \ + bit_permute_step(x, 0x000f000f, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate8(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) \ + do { \ + uint32_t _x = rightRotate16(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate24(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = (x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +/** + * \brief Converts the GIFT-128 nibble-based representation into word-based. + * + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. + * + * The \a input and \a output buffers can be the same buffer. + */ +static void gift128n_to_words + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input buffer into 32-bit words. We use the nibble order + * from the HYENA submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-128 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 12); + s1 = le_load_word32(input + 8); + s2 = le_load_word32(input + 4); + s3 = le_load_word32(input); + + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + PERM_WORDS(s2); + PERM_WORDS(s3); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)s2; + output[3] = (uint8_t)s3; + output[4] = (uint8_t)(s0 >> 8); + output[5] = (uint8_t)(s1 >> 8); + output[6] = (uint8_t)(s2 >> 8); + output[7] = (uint8_t)(s3 >> 8); + output[8] = (uint8_t)(s0 >> 16); + output[9] = (uint8_t)(s1 >> 16); + output[10] = (uint8_t)(s2 >> 16); + output[11] = (uint8_t)(s3 >> 16); + output[12] = (uint8_t)(s0 >> 24); + output[13] = (uint8_t)(s1 >> 24); + output[14] = (uint8_t)(s2 >> 24); + output[15] = (uint8_t)(s3 >> 24); +} + +/** + * \brief Converts the GIFT-128 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift128n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s3 contains the least significant */ + s0 = (((uint32_t)(input[12])) << 24) | + (((uint32_t)(input[8])) << 16) | + (((uint32_t)(input[4])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[13])) << 24) | + (((uint32_t)(input[9])) << 16) | + (((uint32_t)(input[5])) << 8) | + ((uint32_t)(input[1])); + s2 = (((uint32_t)(input[14])) << 24) | + (((uint32_t)(input[10])) << 16) | + (((uint32_t)(input[6])) << 8) | + ((uint32_t)(input[2])); + s3 = (((uint32_t)(input[15])) << 24) | + (((uint32_t)(input[11])) << 16) | + (((uint32_t)(input[7])) << 8) | + ((uint32_t)(input[3])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + INV_PERM_WORDS(s2); + INV_PERM_WORDS(s3); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 12, s0); + le_store_word32(output + 8, s1); + le_store_word32(output + 4, s2); + le_store_word32(output, s3); +} + +void gift128n_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_encrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +void gift128n_decrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_decrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /** * \brief Swaps bits within two words. * @@ -202,21 +446,27 @@ static void gift128b_compute_round_keys /* Keys 8, 9, 18, and 19 do not need any adjustment */ } +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL /* Derive the fixsliced keys for the remaining rounds 11..40 */ for (index = 20; index < 80; index += 10) { gift128b_derive_keys(ks->k + index, ks->k + index - 20); } +#endif } -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) { - if (!ks || !key || key_len != 16) - return 0; gift128b_compute_round_keys (ks, be_load_word32(key), be_load_word32(key + 4), be_load_word32(key + 8), be_load_word32(key + 12)); - return 1; +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission */ + gift128b_compute_round_keys + (ks, le_load_word32(key + 12), le_load_word32(key + 8), + le_load_word32(key + 4), le_load_word32(key)); } /** @@ -521,11 +771,37 @@ int gift128b_init gift128b_inv_sbox(s3, s1, s2, s0); \ } while (0) +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) +{ + /* Mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = be_load_word32(key + 12); + ks->k[1] = be_load_word32(key + 4); + ks->k[2] = be_load_word32(key + 8); + ks->k[3] = be_load_word32(key); +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission + * and mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = le_load_word32(key); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key + 12); +} + +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into the state buffer and convert from big endian */ s0 = be_load_word32(input); @@ -534,14 +810,20 @@ void gift128b_encrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -555,6 +837,7 @@ void gift128b_encrypt_preloaded const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into local variables */ s0 = input[0]; @@ -563,14 +846,20 @@ void gift128b_encrypt_preloaded s3 = input[3]; /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer */ output[0] = s0; @@ -579,7 +868,55 @@ void gift128b_encrypt_preloaded output[3] = s3; } -void gift128b_decrypt +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; + uint32_t k[20]; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { @@ -592,14 +929,14 @@ void gift128b_decrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -608,173 +945,308 @@ void gift128b_decrypt be_store_word32(output + 12, s3); } -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { - /* Use the little-endian key byte order from the HYENA submission */ - if (!ks || !key || key_len != 16) - return 0; - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); - return 1; + uint32_t s0, s1, s2, s3; + + /* Copy the plaintext into local variables */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; + + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_encrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); } -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); + /* Copy the plaintext into the state buffer */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -void gift128n_encrypt +void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) + const unsigned char *input, uint32_t tweak) { + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if (((round + 1) % 5) == 0 && round < 39) + s0 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} + uint32_t s0, s1, s2, s3; -/* 4-bit tweak values expanded to 32-bit */ -static uint32_t const GIFT128_tweaks[16] = { - 0x00000000, 0xe1e1e1e1, 0xd2d2d2d2, 0x33333333, - 0xb4b4b4b4, 0x55555555, 0x66666666, 0x87878787, - 0x78787878, 0x99999999, 0xaaaaaaaa, 0x4b4b4b4b, - 0xcccccccc, 0x2d2d2d2d, 0x1e1e1e1e, 0xffffffff -}; + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); -void gift128t_encrypt + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + +void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; - /* Copy the plaintext into the state buffer and convert from nibbles */ + /* Copy the ciphertext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); @@ -782,25 +1254,24 @@ void gift128t_encrypt s3 = be_load_word32(output + 12); /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + * Every 5 rounds except the first we add the tweak value to the state */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - /* Pack the state into the ciphertext buffer in nibble form */ + /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); @@ -808,37 +1279,211 @@ void gift128t_encrypt gift128n_to_nibbles(output, output); } +#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +/* The small variant uses fixslicing for encryption, but we need to change + * to bitslicing for decryption because of the difficulty of fast-forwarding + * the fixsliced key schedule to the end. So the tiny variant is used for + * decryption when the small variant is selected. Since the NIST AEAD modes + * for GIFT-128 only use the block encrypt operation, the inefficiencies + * in decryption don't matter all that much */ + +/** + * \def gift128b_load_and_forward_schedule() + * \brief Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 40; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 10 times for the full 40 rounds. The overall + * effect is to apply a "20 right and 40 left" bit-rotation to every + * word in the key schedule. That is equivalent to "4 right and 8 left" + * on the 16-bit sub-words. + */ +#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#else +/* The small variant needs to also undo some of the rotations that were + * done to generate the fixsliced version of the key schedule */ +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ + gift128b_swap_move(w3, w3, 0x00003333U, 18); \ + gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ + gift128b_swap_move(w3, w3, 0x00550055U, 9); \ + gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ + gift128b_swap_move(w1, w1, 0x00003333U, 18); \ + gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ + gift128b_swap_move(w1, w1, 0x00550055U, 9); \ + gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ + gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ + gift128b_swap_move(w2, w2, 0x03030303U, 6); \ + gift128b_swap_move(w2, w2, 0x11111111U, 3); \ + gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ + gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ + gift128b_swap_move(w0, w0, 0x03030303U, 6); \ + gift128b_swap_move(w0, w0, 0x11111111U, 3); \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#endif + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); + + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Copy the ciphertext into the state buffer and convert from nibbles */ + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); s2 = be_load_word32(output + 8); s3 = be_load_word32(output + 12); - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if ((round % 5) == 0 && round < 40) + s0 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); @@ -847,3 +1492,7 @@ void gift128t_decrypt be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } + +#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +#endif /* !GIFT128_VARIANT_ASM */ diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.h index 1ac40e5..f57d143 100644 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.h +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128.h @@ -47,11 +47,13 @@ * in any of the NIST submissions so we don't bother with it in this library. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ #include #include +#include "internal-gift128-config.h" #ifdef __cplusplus extern "C" { @@ -63,16 +65,23 @@ extern "C" { #define GIFT128_BLOCK_SIZE 16 /** - * \brief Number of round keys for the fixsliced representation of GIFT-128. + * \var GIFT128_ROUND_KEYS + * \brief Number of round keys for the GIFT-128 key schedule. */ +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY +#define GIFT128_ROUND_KEYS 4 +#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL +#define GIFT128_ROUND_KEYS 20 +#else #define GIFT128_ROUND_KEYS 80 +#endif /** * \brief Structure of the key schedule for GIFT-128 (bit-sliced). */ typedef struct { - /** Pre-computed round keys in the fixsliced form */ + /** Pre-computed round keys for bit-sliced GIFT-128 */ uint32_t k[GIFT128_ROUND_KEYS]; } gift128b_key_schedule_t; @@ -81,14 +90,9 @@ typedef struct * \brief Initializes the key schedule for GIFT-128 (bit-sliced). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). @@ -145,14 +149,9 @@ typedef gift128b_key_schedule_t gift128n_key_schedule_t; * \brief Initializes the key schedule for GIFT-128 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). @@ -182,13 +181,31 @@ void gift128n_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); +/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ +#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ +#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ +#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ +#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ +#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ +#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ +#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ +#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ +#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ +#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ +#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ +#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ +#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ +#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ +#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ +#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ + /** * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). * * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -200,7 +217,7 @@ void gift128n_decrypt */ void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); /** * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). @@ -208,7 +225,7 @@ void gift128t_encrypt * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -220,7 +237,7 @@ void gift128t_encrypt */ void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); #ifdef __cplusplus } diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-avr.S new file mode 100644 index 0000000..2aae304 --- /dev/null +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-avr.S @@ -0,0 +1,4712 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 40 +table_0: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +302: + rcall 455f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 455f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 455f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 455f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 302b + rjmp 804f +455: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +804: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +370: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + cpse r16,r1 + rjmp 370b + rjmp 867f +522: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +867: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r19,r1 + mov r26,r1 +307: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + movw r20,r2 + movw r22,r4 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + mov r0,r8 + and r0,r22 + eor r12,r0 + mov r0,r9 + and r0,r23 + eor r13,r0 + movw r2,r14 + movw r4,r24 + movw r14,r20 + movw r24,r22 + bst r2,1 + bld r0,0 + bst r2,4 + bld r2,1 + bst r4,0 + bld r2,4 + bst r2,2 + bld r4,0 + bst r3,0 + bld r2,2 + bst r2,3 + bld r3,0 + bst r3,4 + bld r2,3 + bst r4,3 + bld r3,4 + bst r3,6 + bld r4,3 + bst r5,3 + bld r3,6 + bst r3,5 + bld r5,3 + bst r4,7 + bld r3,5 + bst r5,6 + bld r4,7 + bst r5,1 + bld r5,6 + bst r2,5 + bld r5,1 + bst r4,4 + bld r2,5 + bst r4,2 + bld r4,4 + bst r3,2 + bld r4,2 + bst r3,3 + bld r3,2 + bst r3,7 + bld r3,3 + bst r5,7 + bld r3,7 + bst r5,5 + bld r5,7 + bst r4,5 + bld r5,5 + bst r4,6 + bld r4,5 + bst r5,2 + bld r4,6 + bst r3,1 + bld r5,2 + bst r2,7 + bld r3,1 + bst r5,4 + bld r2,7 + bst r4,1 + bld r5,4 + bst r2,6 + bld r4,1 + bst r5,0 + bld r2,6 + bst r0,0 + bld r5,0 + bst r6,0 + bld r0,0 + bst r6,1 + bld r6,0 + bst r6,5 + bld r6,1 + bst r8,5 + bld r6,5 + bst r8,7 + bld r8,5 + bst r9,7 + bld r8,7 + bst r9,6 + bld r9,7 + bst r9,2 + bld r9,6 + bst r7,2 + bld r9,2 + bst r7,0 + bld r7,2 + bst r0,0 + bld r7,0 + bst r6,2 + bld r0,0 + bst r7,1 + bld r6,2 + bst r6,4 + bld r7,1 + bst r8,1 + bld r6,4 + bst r6,7 + bld r8,1 + bst r9,5 + bld r6,7 + bst r8,6 + bld r9,5 + bst r9,3 + bld r8,6 + bst r7,6 + bld r9,3 + bst r9,0 + bld r7,6 + bst r0,0 + bld r9,0 + bst r6,3 + bld r0,0 + bst r7,5 + bld r6,3 + bst r8,4 + bld r7,5 + bst r8,3 + bld r8,4 + bst r7,7 + bld r8,3 + bst r9,4 + bld r7,7 + bst r8,2 + bld r9,4 + bst r7,3 + bld r8,2 + bst r7,4 + bld r7,3 + bst r8,0 + bld r7,4 + bst r0,0 + bld r8,0 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r10,2 + bld r10,0 + bst r11,2 + bld r10,2 + bst r11,1 + bld r11,2 + bst r10,5 + bld r11,1 + bst r12,6 + bld r10,5 + bst r13,0 + bld r12,6 + bst r10,3 + bld r13,0 + bst r11,6 + bld r10,3 + bst r13,1 + bld r11,6 + bst r10,7 + bld r13,1 + bst r13,6 + bld r10,7 + bst r13,3 + bld r13,6 + bst r11,7 + bld r13,3 + bst r13,5 + bld r11,7 + bst r12,7 + bld r13,5 + bst r13,4 + bld r12,7 + bst r12,3 + bld r13,4 + bst r11,4 + bld r12,3 + bst r12,1 + bld r11,4 + bst r10,4 + bld r12,1 + bst r12,2 + bld r10,4 + bst r11,0 + bld r12,2 + bst r10,1 + bld r11,0 + bst r10,6 + bld r10,1 + bst r13,2 + bld r10,6 + bst r11,3 + bld r13,2 + bst r11,5 + bld r11,3 + bst r12,5 + bld r11,5 + bst r12,4 + bld r12,5 + bst r12,0 + bld r12,4 + bst r0,0 + bld r12,0 + bst r14,0 + bld r0,0 + bst r14,3 + bld r14,0 + bst r15,7 + bld r14,3 + bst r25,6 + bld r15,7 + bst r25,0 + bld r25,6 + bst r0,0 + bld r25,0 + bst r14,1 + bld r0,0 + bst r14,7 + bld r14,1 + bst r25,7 + bld r14,7 + bst r25,4 + bld r25,7 + bst r24,0 + bld r25,4 + bst r0,0 + bld r24,0 + bst r14,2 + bld r0,0 + bst r15,3 + bld r14,2 + bst r15,6 + bld r15,3 + bst r25,2 + bld r15,6 + bst r15,0 + bld r25,2 + bst r0,0 + bld r15,0 + bst r14,4 + bld r0,0 + bst r24,3 + bld r14,4 + bst r15,5 + bld r24,3 + bst r24,6 + bld r15,5 + bst r25,1 + bld r24,6 + bst r0,0 + bld r25,1 + bst r14,5 + bld r0,0 + bst r24,7 + bld r14,5 + bst r25,5 + bld r24,7 + bst r24,4 + bld r25,5 + bst r24,1 + bld r24,4 + bst r0,0 + bld r24,1 + bst r14,6 + bld r0,0 + bst r25,3 + bld r14,6 + bst r15,4 + bld r25,3 + bst r24,2 + bld r15,4 + bst r15,1 + bld r24,2 + bst r0,0 + bld r15,1 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldd r20,Y+13 + ldd r21,Y+14 + ldd r22,Y+15 + ldd r23,Y+16 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + adc r20,r1 + lsl r20 + rol r21 + adc r20,r1 + lsl r20 + rol r21 + adc r20,r1 + lsl r20 + rol r21 + adc r20,r1 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + ldd r0,Y+1 + std Y+1,r20 + ldd r20,Y+5 + std Y+5,r0 + ldd r0,Y+9 + std Y+9,r20 + std Y+13,r0 + ldd r0,Y+2 + std Y+2,r21 + ldd r21,Y+6 + std Y+6,r0 + ldd r0,Y+10 + std Y+10,r21 + std Y+14,r0 + ldd r0,Y+3 + std Y+3,r22 + ldd r22,Y+7 + std Y+7,r0 + ldd r0,Y+11 + std Y+11,r22 + std Y+15,r0 + ldd r0,Y+4 + std Y+4,r23 + ldd r23,Y+8 + std Y+8,r0 + ldd r0,Y+12 + std Y+12,r23 + std Y+16,r0 + ldi r20,128 + eor r25,r20 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + inc r19 + cpi r19,40 + breq 727f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 307b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 307b +727: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r19,40 + mov r26,r1 +375: + ldd r0,Y+13 + ldd r20,Y+9 + std Y+9,r0 + ldd r0,Y+5 + std Y+5,r20 + ldd r20,Y+1 + std Y+1,r0 + ldd r0,Y+14 + ldd r21,Y+10 + std Y+10,r0 + ldd r0,Y+6 + std Y+6,r21 + ldd r21,Y+2 + std Y+2,r0 + ldd r0,Y+15 + ldd r22,Y+11 + std Y+11,r0 + ldd r0,Y+7 + std Y+7,r22 + ldd r22,Y+3 + std Y+3,r0 + ldd r0,Y+16 + ldd r23,Y+12 + std Y+12,r0 + ldd r0,Y+8 + std Y+8,r23 + ldd r23,Y+4 + std Y+4,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + or r21,r0 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + std Y+13,r20 + std Y+14,r21 + std Y+15,r22 + std Y+16,r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldi r20,128 + eor r25,r20 + dec r19 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + bst r2,1 + bld r0,0 + bst r5,0 + bld r2,1 + bst r2,6 + bld r5,0 + bst r4,1 + bld r2,6 + bst r5,4 + bld r4,1 + bst r2,7 + bld r5,4 + bst r3,1 + bld r2,7 + bst r5,2 + bld r3,1 + bst r4,6 + bld r5,2 + bst r4,5 + bld r4,6 + bst r5,5 + bld r4,5 + bst r5,7 + bld r5,5 + bst r3,7 + bld r5,7 + bst r3,3 + bld r3,7 + bst r3,2 + bld r3,3 + bst r4,2 + bld r3,2 + bst r4,4 + bld r4,2 + bst r2,5 + bld r4,4 + bst r5,1 + bld r2,5 + bst r5,6 + bld r5,1 + bst r4,7 + bld r5,6 + bst r3,5 + bld r4,7 + bst r5,3 + bld r3,5 + bst r3,6 + bld r5,3 + bst r4,3 + bld r3,6 + bst r3,4 + bld r4,3 + bst r2,3 + bld r3,4 + bst r3,0 + bld r2,3 + bst r2,2 + bld r3,0 + bst r4,0 + bld r2,2 + bst r2,4 + bld r4,0 + bst r0,0 + bld r2,4 + bst r6,0 + bld r0,0 + bst r7,0 + bld r6,0 + bst r7,2 + bld r7,0 + bst r9,2 + bld r7,2 + bst r9,6 + bld r9,2 + bst r9,7 + bld r9,6 + bst r8,7 + bld r9,7 + bst r8,5 + bld r8,7 + bst r6,5 + bld r8,5 + bst r6,1 + bld r6,5 + bst r0,0 + bld r6,1 + bst r6,2 + bld r0,0 + bst r9,0 + bld r6,2 + bst r7,6 + bld r9,0 + bst r9,3 + bld r7,6 + bst r8,6 + bld r9,3 + bst r9,5 + bld r8,6 + bst r6,7 + bld r9,5 + bst r8,1 + bld r6,7 + bst r6,4 + bld r8,1 + bst r7,1 + bld r6,4 + bst r0,0 + bld r7,1 + bst r6,3 + bld r0,0 + bst r8,0 + bld r6,3 + bst r7,4 + bld r8,0 + bst r7,3 + bld r7,4 + bst r8,2 + bld r7,3 + bst r9,4 + bld r8,2 + bst r7,7 + bld r9,4 + bst r8,3 + bld r7,7 + bst r8,4 + bld r8,3 + bst r7,5 + bld r8,4 + bst r0,0 + bld r7,5 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r12,0 + bld r10,0 + bst r12,4 + bld r12,0 + bst r12,5 + bld r12,4 + bst r11,5 + bld r12,5 + bst r11,3 + bld r11,5 + bst r13,2 + bld r11,3 + bst r10,6 + bld r13,2 + bst r10,1 + bld r10,6 + bst r11,0 + bld r10,1 + bst r12,2 + bld r11,0 + bst r10,4 + bld r12,2 + bst r12,1 + bld r10,4 + bst r11,4 + bld r12,1 + bst r12,3 + bld r11,4 + bst r13,4 + bld r12,3 + bst r12,7 + bld r13,4 + bst r13,5 + bld r12,7 + bst r11,7 + bld r13,5 + bst r13,3 + bld r11,7 + bst r13,6 + bld r13,3 + bst r10,7 + bld r13,6 + bst r13,1 + bld r10,7 + bst r11,6 + bld r13,1 + bst r10,3 + bld r11,6 + bst r13,0 + bld r10,3 + bst r12,6 + bld r13,0 + bst r10,5 + bld r12,6 + bst r11,1 + bld r10,5 + bst r11,2 + bld r11,1 + bst r10,2 + bld r11,2 + bst r0,0 + bld r10,2 + bst r14,0 + bld r0,0 + bst r25,0 + bld r14,0 + bst r25,6 + bld r25,0 + bst r15,7 + bld r25,6 + bst r14,3 + bld r15,7 + bst r0,0 + bld r14,3 + bst r14,1 + bld r0,0 + bst r24,0 + bld r14,1 + bst r25,4 + bld r24,0 + bst r25,7 + bld r25,4 + bst r14,7 + bld r25,7 + bst r0,0 + bld r14,7 + bst r14,2 + bld r0,0 + bst r15,0 + bld r14,2 + bst r25,2 + bld r15,0 + bst r15,6 + bld r25,2 + bst r15,3 + bld r15,6 + bst r0,0 + bld r15,3 + bst r14,4 + bld r0,0 + bst r25,1 + bld r14,4 + bst r24,6 + bld r25,1 + bst r15,5 + bld r24,6 + bst r24,3 + bld r15,5 + bst r0,0 + bld r24,3 + bst r14,5 + bld r0,0 + bst r24,1 + bld r14,5 + bst r24,4 + bld r24,1 + bst r25,5 + bld r24,4 + bst r24,7 + bld r25,5 + bst r0,0 + bld r24,7 + bst r14,6 + bld r0,0 + bst r15,1 + bld r14,6 + bst r24,2 + bld r15,1 + bst r15,4 + bld r24,2 + bst r25,3 + bld r15,4 + bst r0,0 + bld r25,3 + movw r20,r14 + movw r22,r24 + movw r14,r2 + movw r24,r4 + movw r2,r20 + movw r4,r22 + and r20,r6 + and r21,r7 + and r22,r8 + and r23,r9 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + cp r19,r1 + breq 791f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 375b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 375b +791: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-full-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-full-avr.S new file mode 100644 index 0000000..3a7e6fb --- /dev/null +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-full-avr.S @@ -0,0 +1,8173 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + ldi r24,4 +33: + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r29 + ror r28 + ror r0 + lsr r29 + ror r28 + ror r0 + or r29,r0 + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r28 + mov r28,r4 + mov r4,r0 + mov r0,r29 + mov r29,r5 + mov r5,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + mov r0,r6 + mov r6,r10 + mov r10,r0 + mov r0,r7 + mov r7,r11 + mov r11,r0 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + st Z,r29 + std Z+1,r23 + std Z+2,r28 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+4,r29 + std Z+5,r23 + std Z+6,r28 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r28,Z+10 + ldd r29,Z+11 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+8,r29 + std Z+9,r23 + std Z+10,r28 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+12,r29 + std Z+13,r23 + std Z+14,r28 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+16,r29 + std Z+17,r23 + std Z+18,r28 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+20,r29 + std Z+21,r23 + std Z+22,r28 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+24,r29 + std Z+25,r23 + std Z+26,r28 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+28,r29 + std Z+29,r23 + std Z+30,r28 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + adiw r30,40 + movw r26,r30 + subi r26,80 + sbc r27,r1 + ldi r24,6 +1274: + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r2 + eor r19,r3 + andi r18,51 + andi r19,51 + eor r2,r18 + eor r3,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + movw r18,r22 + movw r20,r28 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + andi r28,204 + andi r29,204 + or r28,r21 + or r29,r18 + or r22,r19 + or r23,r20 + movw r18,r28 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r28 + eor r19,r29 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r28 + std Z+5,r29 + std Z+6,r22 + std Z+7,r23 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + swap r3 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + swap r5 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r29 + adc r29,r1 + lsl r29 + adc r29,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r28 + std Z+15,r29 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + ldi r25,85 + and r2,r25 + and r3,r25 + and r4,r25 + and r5,r25 + or r2,r19 + or r3,r20 + or r4,r21 + or r5,r18 + std Z+16,r4 + std Z+17,r5 + std Z+18,r2 + std Z+19,r3 + movw r18,r22 + movw r20,r28 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + andi r28,170 + andi r29,170 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + or r22,r18 + or r23,r19 + or r28,r20 + or r29,r21 + std Z+20,r29 + std Z+21,r22 + std Z+22,r23 + std Z+23,r28 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r14,r18 + movw r16,r20 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + eor r14,r18 + eor r15,r19 + eor r16,r20 + eor r17,r21 + ldi r25,8 + and r14,r25 + and r15,r25 + andi r16,8 + andi r17,8 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + ldi r17,15 + and r2,r17 + and r3,r17 + and r4,r17 + and r5,r17 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + movw r18,r28 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r2,r22 + movw r4,r28 + ldi r16,1 + and r2,r16 + and r3,r16 + and r4,r16 + and r5,r16 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + or r2,r18 + or r3,r19 + movw r18,r28 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r2,r18 + or r3,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r4,r18 + or r5,r19 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r4,r22 + or r5,r23 + std Z+28,r2 + std Z+29,r3 + std Z+30,r4 + std Z+31,r5 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + std Z+32,r3 + std Z+33,r2 + std Z+34,r4 + std Z+35,r5 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r28 + mov r28,r29 + mov r29,r0 + lsl r28 + rol r29 + adc r28,r1 + lsl r28 + rol r29 + adc r28,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r28 + std Z+39,r29 + dec r24 + breq 1733f + adiw r30,40 + rjmp 1274b +1733: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rjmp 1021f +283: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +1021: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,160 + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rjmp 1024f +286: + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r1 + lsr r22 + ror r0 + lsr r22 + ror r0 + or r22,r0 + mov r0,r1 + lsr r23 + ror r0 + lsr r23 + ror r0 + or r23,r0 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + swap r4 + swap r5 + swap r6 + swap r7 + lsl r8 + adc r8,r1 + lsl r8 + adc r8,r1 + lsl r9 + adc r9,r1 + lsl r9 + adc r9,r1 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,119 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,17 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1024: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + rjmp 1049f +311: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,17 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,119 + and r14,r16 + and r15,r16 + andi r24,119 + andi r25,119 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + swap r6 + swap r7 + swap r8 + swap r9 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + mov r0,r1 + lsr r12 + ror r0 + lsr r12 + ror r0 + or r12,r0 + mov r0,r1 + lsr r13 + ror r0 + lsr r13 + ror r0 + or r13,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + ret +1049: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,160 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + rjmp 1052f +314: + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + mov r0,r1 + lsr r4 + ror r0 + lsr r4 + ror r0 + or r4,r0 + mov r0,r1 + lsr r5 + ror r0 + lsr r5 + ror r0 + or r5,r0 + swap r6 + swap r7 + swap r8 + swap r9 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + com r2 + com r3 + com r4 + com r5 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + com r2 + com r3 + com r4 + com r5 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,119 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r25 + ror r24 + ror r15 + ror r14 + lsr r25 + ror r24 + ror r15 + ror r14 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,17 + and r14,r16 + and r15,r16 + andi r24,17 + andi r25,17 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + ret +1052: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif + +#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-small-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-small-avr.S new file mode 100644 index 0000000..6f2d68b --- /dev/null +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-small-avr.S @@ -0,0 +1,9331 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +33: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 329f + rcall 329f + rjmp 1541f +329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +1067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +934: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + cpse r16,r1 + rjmp 934b + rjmp 1431f +1086: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1431: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r19,20 +1: + ld r2,Z+ + ld r3,Z+ + ld r4,Z+ + ld r5,Z+ + std Y+1,r2 + std Y+2,r3 + std Y+3,r4 + std Y+4,r5 + adiw r28,4 + dec r19 + brne 1b + subi r28,80 + sbc r29,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,20 + adiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,40 + sbiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,60 + adiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,80 + sbiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,100 + adiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,120 + sbiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 357f + rjmp 1570f +357: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,17 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,119 + and r14,r16 + and r15,r16 + andi r24,119 + andi r25,119 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + swap r6 + swap r7 + swap r8 + swap r9 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + mov r0,r1 + lsr r12 + ror r0 + lsr r12 + ror r0 + or r12,r0 + mov r0,r1 + lsr r13 + ror r0 + lsr r13 + ror r0 + or r13,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + ret +1095: + movw r30,r26 + sbiw r30,40 + push r5 + push r4 + push r3 + push r2 + push r9 + push r8 + push r7 + push r6 + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,68 + andi r21,68 + andi r22,85 + andi r23,85 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + st Z,r26 + std Z+1,r27 + std Z+2,r16 + std Z+3,r17 + movw r20,r2 + movw r22,r4 + andi r20,51 + andi r21,51 + andi r22,51 + andi r23,51 + ldi r19,204 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + or r4,r23 + or r5,r20 + or r2,r21 + or r3,r22 + movw r20,r4 + movw r22,r2 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r4 + eor r21,r5 + eor r22,r2 + eor r23,r3 + mov r20,r1 + andi r21,17 + andi r22,85 + andi r23,85 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + std Z+4,r4 + std Z+5,r5 + std Z+6,r2 + std Z+7,r3 + ldd r2,Z+8 + ldd r3,Z+9 + ldd r4,Z+10 + ldd r5,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r16 + adc r16,r1 + lsl r16 + adc r16,r1 + swap r17 + std Z+8,r26 + std Z+9,r27 + std Z+10,r16 + std Z+11,r17 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + std Z+12,r2 + std Z+13,r3 + std Z+14,r4 + std Z+15,r5 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r16,Z+22 + ldd r17,Z+23 + movw r20,r26 + movw r22,r16 + andi r20,170 + andi r21,170 + andi r22,170 + andi r23,170 + andi r26,85 + andi r27,85 + andi r16,85 + andi r17,85 + or r26,r21 + or r27,r22 + or r16,r23 + or r17,r20 + std Z+16,r16 + std Z+17,r17 + std Z+18,r26 + std Z+19,r27 + movw r20,r2 + movw r22,r4 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + ldi r19,170 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + or r2,r20 + or r3,r21 + or r4,r22 + or r5,r23 + std Z+20,r5 + std Z+21,r2 + std Z+22,r3 + std Z+23,r4 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r16,Z+30 + ldd r17,Z+31 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,120 + andi r21,120 + andi r22,120 + andi r23,120 + movw r6,r20 + movw r8,r22 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldi r19,8 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r26,15 + andi r27,15 + andi r16,15 + andi r17,15 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + std Z+24,r26 + std Z+25,r27 + std Z+26,r16 + std Z+27,r17 + movw r20,r4 + lsr r21 + ror r20 + lsr r21 + ror r20 + andi r20,48 + andi r21,48 + movw r26,r2 + movw r16,r4 + andi r26,1 + andi r27,1 + andi r16,1 + andi r17,1 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + or r26,r20 + or r27,r21 + movw r20,r4 + lsl r20 + rol r21 + lsl r20 + rol r21 + andi r20,192 + andi r21,192 + or r26,r20 + or r27,r21 + movw r20,r2 + andi r20,224 + andi r21,224 + lsr r21 + ror r20 + or r16,r20 + or r17,r21 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,7 + andi r21,7 + andi r22,7 + andi r23,7 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + ldi r19,16 + and r2,r19 + and r3,r19 + lsl r2 + rol r3 + lsl r2 + rol r3 + lsl r2 + rol r3 + or r16,r2 + or r17,r3 + std Z+28,r26 + std Z+29,r27 + std Z+30,r16 + std Z+31,r17 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r16,Z+38 + ldd r17,Z+39 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r16 + std Z+35,r17 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r4 + mov r4,r5 + mov r5,r0 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + pop r6 + pop r7 + pop r8 + pop r9 + pop r2 + pop r3 + pop r4 + pop r5 + movw r26,r30 + ret +1570: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r26,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r19,40 + mov r26,r1 +939: + ldd r0,Y+13 + ldd r20,Y+9 + std Y+9,r0 + ldd r0,Y+5 + std Y+5,r20 + ldd r20,Y+1 + std Y+1,r0 + ldd r0,Y+14 + ldd r21,Y+10 + std Y+10,r0 + ldd r0,Y+6 + std Y+6,r21 + ldd r21,Y+2 + std Y+2,r0 + ldd r0,Y+15 + ldd r22,Y+11 + std Y+11,r0 + ldd r0,Y+7 + std Y+7,r22 + ldd r22,Y+3 + std Y+3,r0 + ldd r0,Y+16 + ldd r23,Y+12 + std Y+12,r0 + ldd r0,Y+8 + std Y+8,r23 + ldd r23,Y+4 + std Y+4,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + or r21,r0 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + std Y+13,r20 + std Y+14,r21 + std Y+15,r22 + std Y+16,r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldi r20,128 + eor r25,r20 + dec r19 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + bst r2,1 + bld r0,0 + bst r5,0 + bld r2,1 + bst r2,6 + bld r5,0 + bst r4,1 + bld r2,6 + bst r5,4 + bld r4,1 + bst r2,7 + bld r5,4 + bst r3,1 + bld r2,7 + bst r5,2 + bld r3,1 + bst r4,6 + bld r5,2 + bst r4,5 + bld r4,6 + bst r5,5 + bld r4,5 + bst r5,7 + bld r5,5 + bst r3,7 + bld r5,7 + bst r3,3 + bld r3,7 + bst r3,2 + bld r3,3 + bst r4,2 + bld r3,2 + bst r4,4 + bld r4,2 + bst r2,5 + bld r4,4 + bst r5,1 + bld r2,5 + bst r5,6 + bld r5,1 + bst r4,7 + bld r5,6 + bst r3,5 + bld r4,7 + bst r5,3 + bld r3,5 + bst r3,6 + bld r5,3 + bst r4,3 + bld r3,6 + bst r3,4 + bld r4,3 + bst r2,3 + bld r3,4 + bst r3,0 + bld r2,3 + bst r2,2 + bld r3,0 + bst r4,0 + bld r2,2 + bst r2,4 + bld r4,0 + bst r0,0 + bld r2,4 + bst r6,0 + bld r0,0 + bst r7,0 + bld r6,0 + bst r7,2 + bld r7,0 + bst r9,2 + bld r7,2 + bst r9,6 + bld r9,2 + bst r9,7 + bld r9,6 + bst r8,7 + bld r9,7 + bst r8,5 + bld r8,7 + bst r6,5 + bld r8,5 + bst r6,1 + bld r6,5 + bst r0,0 + bld r6,1 + bst r6,2 + bld r0,0 + bst r9,0 + bld r6,2 + bst r7,6 + bld r9,0 + bst r9,3 + bld r7,6 + bst r8,6 + bld r9,3 + bst r9,5 + bld r8,6 + bst r6,7 + bld r9,5 + bst r8,1 + bld r6,7 + bst r6,4 + bld r8,1 + bst r7,1 + bld r6,4 + bst r0,0 + bld r7,1 + bst r6,3 + bld r0,0 + bst r8,0 + bld r6,3 + bst r7,4 + bld r8,0 + bst r7,3 + bld r7,4 + bst r8,2 + bld r7,3 + bst r9,4 + bld r8,2 + bst r7,7 + bld r9,4 + bst r8,3 + bld r7,7 + bst r8,4 + bld r8,3 + bst r7,5 + bld r8,4 + bst r0,0 + bld r7,5 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r12,0 + bld r10,0 + bst r12,4 + bld r12,0 + bst r12,5 + bld r12,4 + bst r11,5 + bld r12,5 + bst r11,3 + bld r11,5 + bst r13,2 + bld r11,3 + bst r10,6 + bld r13,2 + bst r10,1 + bld r10,6 + bst r11,0 + bld r10,1 + bst r12,2 + bld r11,0 + bst r10,4 + bld r12,2 + bst r12,1 + bld r10,4 + bst r11,4 + bld r12,1 + bst r12,3 + bld r11,4 + bst r13,4 + bld r12,3 + bst r12,7 + bld r13,4 + bst r13,5 + bld r12,7 + bst r11,7 + bld r13,5 + bst r13,3 + bld r11,7 + bst r13,6 + bld r13,3 + bst r10,7 + bld r13,6 + bst r13,1 + bld r10,7 + bst r11,6 + bld r13,1 + bst r10,3 + bld r11,6 + bst r13,0 + bld r10,3 + bst r12,6 + bld r13,0 + bst r10,5 + bld r12,6 + bst r11,1 + bld r10,5 + bst r11,2 + bld r11,1 + bst r10,2 + bld r11,2 + bst r0,0 + bld r10,2 + bst r14,0 + bld r0,0 + bst r25,0 + bld r14,0 + bst r25,6 + bld r25,0 + bst r15,7 + bld r25,6 + bst r14,3 + bld r15,7 + bst r0,0 + bld r14,3 + bst r14,1 + bld r0,0 + bst r24,0 + bld r14,1 + bst r25,4 + bld r24,0 + bst r25,7 + bld r25,4 + bst r14,7 + bld r25,7 + bst r0,0 + bld r14,7 + bst r14,2 + bld r0,0 + bst r15,0 + bld r14,2 + bst r25,2 + bld r15,0 + bst r15,6 + bld r25,2 + bst r15,3 + bld r15,6 + bst r0,0 + bld r15,3 + bst r14,4 + bld r0,0 + bst r25,1 + bld r14,4 + bst r24,6 + bld r25,1 + bst r15,5 + bld r24,6 + bst r24,3 + bld r15,5 + bst r0,0 + bld r24,3 + bst r14,5 + bld r0,0 + bst r24,1 + bld r14,5 + bst r24,4 + bld r24,1 + bst r25,5 + bld r24,4 + bst r24,7 + bld r25,5 + bst r0,0 + bld r24,7 + bst r14,6 + bld r0,0 + bst r15,1 + bld r14,6 + bst r24,2 + bld r15,1 + bst r15,4 + bld r24,2 + bst r25,3 + bld r15,4 + bst r0,0 + bld r25,3 + movw r20,r14 + movw r22,r24 + movw r14,r2 + movw r24,r4 + movw r2,r20 + movw r4,r22 + and r20,r6 + and r21,r7 + and r22,r8 + and r23,r9 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + cp r19,r1 + breq 1355f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 939b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 939b +1355: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif + +#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-tiny-avr.S b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-tiny-avr.S new file mode 100644 index 0000000..dd1f7b9 --- /dev/null +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-gift128n-tiny-avr.S @@ -0,0 +1,9480 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + st Z,r22 + std Z+1,r23 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1585f + rcall 1585f + rjmp 2797f +1585: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2323: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2797: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +370: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + cpse r16,r1 + rjmp 370b + rjmp 867f +522: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +867: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + ldd r6,Z+4 + ldd r7,Z+5 + ldd r8,Z+6 + ldd r9,Z+7 + ldd r10,Z+8 + ldd r11,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + ldd r14,Z+12 + ldd r15,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + st Z+,r24 + st Z+,r25 + ldi r19,4 +35: + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + mov r0,r4 + mov r4,r8 + mov r8,r0 + mov r0,r5 + mov r5,r9 + mov r9,r0 + st Z+,r14 + st Z+,r15 + st Z+,r24 + st Z+,r25 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + mov r0,r12 + mov r12,r24 + mov r24,r0 + mov r0,r13 + mov r13,r25 + mov r25,r0 + dec r19 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r19,2 +121: + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + st Z,r5 + std Z+1,r3 + std Z+2,r4 + std Z+3,r2 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+4,r5 + std Z+5,r3 + std Z+6,r4 + std Z+7,r2 + ldd r2,Z+8 + ldd r3,Z+9 + ldd r4,Z+10 + ldd r5,Z+11 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+8,r5 + std Z+9,r3 + std Z+10,r4 + std Z+11,r2 + ldd r2,Z+12 + ldd r3,Z+13 + ldd r4,Z+14 + ldd r5,Z+15 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+12,r5 + std Z+13,r3 + std Z+14,r4 + std Z+15,r2 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r21 + rol r22 + rol r23 + rol r0 + movw r20,r22 + mov r22,r0 + mov r23,r1 + eor r20,r2 + eor r21,r3 + andi r20,170 + andi r21,170 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r0,r1 + lsr r22 + ror r21 + ror r20 + ror r0 + movw r22,r20 + mov r21,r0 + mov r20,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+16,r5 + std Z+17,r3 + std Z+18,r4 + std Z+19,r2 + ldd r2,Z+20 + ldd r3,Z+21 + ldd r4,Z+22 + ldd r5,Z+23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r21 + rol r22 + rol r23 + rol r0 + movw r20,r22 + mov r22,r0 + mov r23,r1 + eor r20,r2 + eor r21,r3 + andi r20,170 + andi r21,170 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r0,r1 + lsr r22 + ror r21 + ror r20 + ror r0 + movw r22,r20 + mov r21,r0 + mov r20,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+20,r5 + std Z+21,r3 + std Z+22,r4 + std Z+23,r2 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,10 + andi r21,10 + andi r22,10 + andi r23,10 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,204 + mov r21,r1 + andi r22,204 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+24,r5 + std Z+25,r3 + std Z+26,r4 + std Z+27,r2 + ldd r2,Z+28 + ldd r3,Z+29 + ldd r4,Z+30 + ldd r5,Z+31 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,10 + andi r21,10 + andi r22,10 + andi r23,10 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,204 + mov r21,r1 + andi r22,204 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+28,r5 + std Z+29,r3 + std Z+30,r4 + std Z+31,r2 + dec r19 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,20 + adiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,60 + adiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,100 + adiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 1613f + rjmp 2826f +1613: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,17 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,119 + and r14,r16 + and r15,r16 + andi r24,119 + andi r25,119 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + swap r6 + swap r7 + swap r8 + swap r9 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + mov r0,r1 + lsr r12 + ror r0 + lsr r12 + ror r0 + or r12,r0 + mov r0,r1 + lsr r13 + ror r0 + lsr r13 + ror r0 + or r13,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + ret +2351: + movw r30,r26 + sbiw r30,40 + push r5 + push r4 + push r3 + push r2 + push r9 + push r8 + push r7 + push r6 + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,68 + andi r21,68 + andi r22,85 + andi r23,85 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + st Z,r26 + std Z+1,r27 + std Z+2,r16 + std Z+3,r17 + movw r20,r2 + movw r22,r4 + andi r20,51 + andi r21,51 + andi r22,51 + andi r23,51 + ldi r19,204 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + or r4,r23 + or r5,r20 + or r2,r21 + or r3,r22 + movw r20,r4 + movw r22,r2 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r4 + eor r21,r5 + eor r22,r2 + eor r23,r3 + mov r20,r1 + andi r21,17 + andi r22,85 + andi r23,85 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + std Z+4,r4 + std Z+5,r5 + std Z+6,r2 + std Z+7,r3 + ldd r2,Z+8 + ldd r3,Z+9 + ldd r4,Z+10 + ldd r5,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r16 + adc r16,r1 + lsl r16 + adc r16,r1 + swap r17 + std Z+8,r26 + std Z+9,r27 + std Z+10,r16 + std Z+11,r17 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + std Z+12,r2 + std Z+13,r3 + std Z+14,r4 + std Z+15,r5 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r16,Z+22 + ldd r17,Z+23 + movw r20,r26 + movw r22,r16 + andi r20,170 + andi r21,170 + andi r22,170 + andi r23,170 + andi r26,85 + andi r27,85 + andi r16,85 + andi r17,85 + or r26,r21 + or r27,r22 + or r16,r23 + or r17,r20 + std Z+16,r16 + std Z+17,r17 + std Z+18,r26 + std Z+19,r27 + movw r20,r2 + movw r22,r4 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + ldi r19,170 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + or r2,r20 + or r3,r21 + or r4,r22 + or r5,r23 + std Z+20,r5 + std Z+21,r2 + std Z+22,r3 + std Z+23,r4 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r16,Z+30 + ldd r17,Z+31 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,120 + andi r21,120 + andi r22,120 + andi r23,120 + movw r6,r20 + movw r8,r22 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldi r19,8 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r26,15 + andi r27,15 + andi r16,15 + andi r17,15 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + std Z+24,r26 + std Z+25,r27 + std Z+26,r16 + std Z+27,r17 + movw r20,r4 + lsr r21 + ror r20 + lsr r21 + ror r20 + andi r20,48 + andi r21,48 + movw r26,r2 + movw r16,r4 + andi r26,1 + andi r27,1 + andi r16,1 + andi r17,1 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + or r26,r20 + or r27,r21 + movw r20,r4 + lsl r20 + rol r21 + lsl r20 + rol r21 + andi r20,192 + andi r21,192 + or r26,r20 + or r27,r21 + movw r20,r2 + andi r20,224 + andi r21,224 + lsr r21 + ror r20 + or r16,r20 + or r17,r21 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,7 + andi r21,7 + andi r22,7 + andi r23,7 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + ldi r19,16 + and r2,r19 + and r3,r19 + lsl r2 + rol r3 + lsl r2 + rol r3 + lsl r2 + rol r3 + or r16,r2 + or r17,r3 + std Z+28,r26 + std Z+29,r27 + std Z+30,r16 + std Z+31,r17 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r16,Z+38 + ldd r17,Z+39 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r16 + std Z+35,r17 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r4 + mov r4,r5 + mov r5,r0 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + pop r6 + pop r7 + pop r8 + pop r9 + pop r2 + pop r3 + pop r4 + pop r5 + movw r26,r30 + ret +2826: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r26,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r19,40 + mov r26,r1 +375: + ldd r0,Y+13 + ldd r20,Y+9 + std Y+9,r0 + ldd r0,Y+5 + std Y+5,r20 + ldd r20,Y+1 + std Y+1,r0 + ldd r0,Y+14 + ldd r21,Y+10 + std Y+10,r0 + ldd r0,Y+6 + std Y+6,r21 + ldd r21,Y+2 + std Y+2,r0 + ldd r0,Y+15 + ldd r22,Y+11 + std Y+11,r0 + ldd r0,Y+7 + std Y+7,r22 + ldd r22,Y+3 + std Y+3,r0 + ldd r0,Y+16 + ldd r23,Y+12 + std Y+12,r0 + ldd r0,Y+8 + std Y+8,r23 + ldd r23,Y+4 + std Y+4,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + or r21,r0 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + std Y+13,r20 + std Y+14,r21 + std Y+15,r22 + std Y+16,r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldi r20,128 + eor r25,r20 + dec r19 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + bst r2,1 + bld r0,0 + bst r5,0 + bld r2,1 + bst r2,6 + bld r5,0 + bst r4,1 + bld r2,6 + bst r5,4 + bld r4,1 + bst r2,7 + bld r5,4 + bst r3,1 + bld r2,7 + bst r5,2 + bld r3,1 + bst r4,6 + bld r5,2 + bst r4,5 + bld r4,6 + bst r5,5 + bld r4,5 + bst r5,7 + bld r5,5 + bst r3,7 + bld r5,7 + bst r3,3 + bld r3,7 + bst r3,2 + bld r3,3 + bst r4,2 + bld r3,2 + bst r4,4 + bld r4,2 + bst r2,5 + bld r4,4 + bst r5,1 + bld r2,5 + bst r5,6 + bld r5,1 + bst r4,7 + bld r5,6 + bst r3,5 + bld r4,7 + bst r5,3 + bld r3,5 + bst r3,6 + bld r5,3 + bst r4,3 + bld r3,6 + bst r3,4 + bld r4,3 + bst r2,3 + bld r3,4 + bst r3,0 + bld r2,3 + bst r2,2 + bld r3,0 + bst r4,0 + bld r2,2 + bst r2,4 + bld r4,0 + bst r0,0 + bld r2,4 + bst r6,0 + bld r0,0 + bst r7,0 + bld r6,0 + bst r7,2 + bld r7,0 + bst r9,2 + bld r7,2 + bst r9,6 + bld r9,2 + bst r9,7 + bld r9,6 + bst r8,7 + bld r9,7 + bst r8,5 + bld r8,7 + bst r6,5 + bld r8,5 + bst r6,1 + bld r6,5 + bst r0,0 + bld r6,1 + bst r6,2 + bld r0,0 + bst r9,0 + bld r6,2 + bst r7,6 + bld r9,0 + bst r9,3 + bld r7,6 + bst r8,6 + bld r9,3 + bst r9,5 + bld r8,6 + bst r6,7 + bld r9,5 + bst r8,1 + bld r6,7 + bst r6,4 + bld r8,1 + bst r7,1 + bld r6,4 + bst r0,0 + bld r7,1 + bst r6,3 + bld r0,0 + bst r8,0 + bld r6,3 + bst r7,4 + bld r8,0 + bst r7,3 + bld r7,4 + bst r8,2 + bld r7,3 + bst r9,4 + bld r8,2 + bst r7,7 + bld r9,4 + bst r8,3 + bld r7,7 + bst r8,4 + bld r8,3 + bst r7,5 + bld r8,4 + bst r0,0 + bld r7,5 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r12,0 + bld r10,0 + bst r12,4 + bld r12,0 + bst r12,5 + bld r12,4 + bst r11,5 + bld r12,5 + bst r11,3 + bld r11,5 + bst r13,2 + bld r11,3 + bst r10,6 + bld r13,2 + bst r10,1 + bld r10,6 + bst r11,0 + bld r10,1 + bst r12,2 + bld r11,0 + bst r10,4 + bld r12,2 + bst r12,1 + bld r10,4 + bst r11,4 + bld r12,1 + bst r12,3 + bld r11,4 + bst r13,4 + bld r12,3 + bst r12,7 + bld r13,4 + bst r13,5 + bld r12,7 + bst r11,7 + bld r13,5 + bst r13,3 + bld r11,7 + bst r13,6 + bld r13,3 + bst r10,7 + bld r13,6 + bst r13,1 + bld r10,7 + bst r11,6 + bld r13,1 + bst r10,3 + bld r11,6 + bst r13,0 + bld r10,3 + bst r12,6 + bld r13,0 + bst r10,5 + bld r12,6 + bst r11,1 + bld r10,5 + bst r11,2 + bld r11,1 + bst r10,2 + bld r11,2 + bst r0,0 + bld r10,2 + bst r14,0 + bld r0,0 + bst r25,0 + bld r14,0 + bst r25,6 + bld r25,0 + bst r15,7 + bld r25,6 + bst r14,3 + bld r15,7 + bst r0,0 + bld r14,3 + bst r14,1 + bld r0,0 + bst r24,0 + bld r14,1 + bst r25,4 + bld r24,0 + bst r25,7 + bld r25,4 + bst r14,7 + bld r25,7 + bst r0,0 + bld r14,7 + bst r14,2 + bld r0,0 + bst r15,0 + bld r14,2 + bst r25,2 + bld r15,0 + bst r15,6 + bld r25,2 + bst r15,3 + bld r15,6 + bst r0,0 + bld r15,3 + bst r14,4 + bld r0,0 + bst r25,1 + bld r14,4 + bst r24,6 + bld r25,1 + bst r15,5 + bld r24,6 + bst r24,3 + bld r15,5 + bst r0,0 + bld r24,3 + bst r14,5 + bld r0,0 + bst r24,1 + bld r14,5 + bst r24,4 + bld r24,1 + bst r25,5 + bld r24,4 + bst r24,7 + bld r25,5 + bst r0,0 + bld r24,7 + bst r14,6 + bld r0,0 + bst r15,1 + bld r14,6 + bst r24,2 + bld r15,1 + bst r15,4 + bld r24,2 + bst r25,3 + bld r15,4 + bst r0,0 + bld r25,3 + movw r20,r14 + movw r22,r24 + movw r14,r2 + movw r24,r4 + movw r2,r20 + movw r4,r22 + and r20,r6 + and r21,r7 + and r22,r8 + and r23,r9 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + cp r19,r1 + breq 791f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 375b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 375b +791: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif + +#endif diff --git a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-util.h b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-util.h +++ b/estate/Implementations/crypto_aead/estatetwegift128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/api.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/api.h deleted file mode 100644 index 3818b25..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 6 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/encrypt.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/encrypt.c deleted file mode 100644 index 3741901..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "forkae.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_128_192_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_128_192_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.c deleted file mode 100644 index 4a9671a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "forkae.h" -#include "internal-forkskinny.h" -#include "internal-util.h" -#include - -aead_cipher_t const forkae_paef_64_192_cipher = { - "PAEF-ForkSkinny-64-192", - FORKAE_PAEF_64_192_KEY_SIZE, - FORKAE_PAEF_64_192_NONCE_SIZE, - FORKAE_PAEF_64_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_64_192_aead_encrypt, - forkae_paef_64_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_192_cipher = { - "PAEF-ForkSkinny-128-192", - FORKAE_PAEF_128_192_KEY_SIZE, - FORKAE_PAEF_128_192_NONCE_SIZE, - FORKAE_PAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_192_aead_encrypt, - forkae_paef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_256_cipher = { - "PAEF-ForkSkinny-128-256", - FORKAE_PAEF_128_256_KEY_SIZE, - FORKAE_PAEF_128_256_NONCE_SIZE, - FORKAE_PAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_256_aead_encrypt, - forkae_paef_128_256_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_288_cipher = { - "PAEF-ForkSkinny-128-288", - FORKAE_PAEF_128_288_KEY_SIZE, - FORKAE_PAEF_128_288_NONCE_SIZE, - FORKAE_PAEF_128_288_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_288_aead_encrypt, - forkae_paef_128_288_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_192_cipher = { - "SAEF-ForkSkinny-128-192", - FORKAE_SAEF_128_192_KEY_SIZE, - FORKAE_SAEF_128_192_NONCE_SIZE, - FORKAE_SAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_192_aead_encrypt, - forkae_saef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_256_cipher = { - "SAEF-ForkSkinny-128-256", - FORKAE_SAEF_128_256_KEY_SIZE, - FORKAE_SAEF_128_256_NONCE_SIZE, - FORKAE_SAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_256_aead_encrypt, - forkae_saef_128_256_aead_decrypt -}; - -/* PAEF-ForkSkinny-64-192 */ -#define FORKAE_ALG_NAME forkae_paef_64_192 -#define FORKAE_BLOCK_SIZE 8 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_64_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_64_192 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_paef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_paef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_256_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-288 */ -#define FORKAE_ALG_NAME forkae_paef_128_288 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_288_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 7 -#define FORKAE_TWEAKEY_SIZE 48 -#define FORKAE_BLOCK_FUNC forkskinny_128_384 -#include "internal-forkae-paef.h" - -/* SAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_saef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_192_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" - -/* SAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_saef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_256_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.h deleted file mode 100644 index 3e27b50..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/forkae.h +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_FORKAE_H -#define LWCRYPTO_FORKAE_H - -#include "aead-common.h" - -/** - * \file forkae.h - * \brief ForkAE authenticated encryption algorithm family. - * - * ForkAE is a family of authenticated encryption algorithms based on a - * modified version of the SKINNY tweakable block cipher. The modifications - * introduce "forking" where each input block produces two output blocks - * for use in encryption and authentication. There are six members in - * the ForkAE family: - * - * \li PAEF-ForkSkinny-64-192 has a 128-bit key, a 48-bit nonce, and a - * 64-bit authentication tag. The associated data and plaintext are - * limited to 216 bytes. - * \li PAEF-ForkSkinny-128-192 has a 128-bit key, a 48-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-256 has a 128-bit key, a 112-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-288 has a 128-bit key, a 104-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 257 bytes. This is the primary member of the family. - * \li SAEF-ForkSkinny-128-192 has a 128-bit key, a 56-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * \li SAEF-ForkSkinny-128-256 has a 128-bit key, a 120-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * - * The PAEF variants support parallel encryption and decryption for - * higher throughput. The SAEF variants encrypt or decrypt blocks - * sequentially. - * - * ForkAE is designed to be efficient on small packet sizes so most of - * the PAEF algorithms have a limit of 64k or 128k on the amount of - * payload in a single packet. Obviously the input can be split into - * separate packets for larger amounts of data. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_TAG_SIZE 8 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_NONCE_SIZE 14 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_NONCE_SIZE 13 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_NONCE_SIZE 7 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-64-192 cipher. - */ -extern aead_cipher_t const forkae_paef_64_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_paef_128_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_paef_128_256_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-288 cipher. - */ -extern aead_cipher_t const forkae_paef_128_288_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_saef_128_192_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_saef_128_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_64_192_aead_decrypt() - */ -int forkae_paef_64_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_64_192_aead_encrypt() - */ -int forkae_paef_64_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_192_aead_decrypt() - */ -int forkae_paef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_192_aead_encrypt() - */ -int forkae_paef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_256_aead_decrypt() - */ -int forkae_paef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_256_aead_encrypt() - */ -int forkae_paef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_288_aead_decrypt() - */ -int forkae_paef_128_288_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_288_aead_encrypt() - */ -int forkae_paef_128_288_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_192_aead_decrypt() - */ -int forkae_saef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_192_aead_encrypt() - */ -int forkae_saef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_256_aead_decrypt() - */ -int forkae_saef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_256_aead_encrypt() - */ -int forkae_saef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-paef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-paef.h deleted file mode 100644 index 6f57b2b..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-paef.h +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE PAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_paef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_COUNTER_SIZE Size of the counter value for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Limit on the amount of data we can process based on the counter size */ -#define FORKAE_PAEF_DATA_LIMIT \ - ((unsigned long long)((1ULL << (FORKAE_COUNTER_SIZE * 8)) * \ - (FORKAE_BLOCK_SIZE / 8)) - FORKAE_BLOCK_SIZE) - -/* Processes the associated data in PAEF mode */ -STATIC_INLINE void FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter) - (unsigned char tweakey[FORKAE_TWEAKEY_SIZE], - unsigned long long counter, unsigned char domain) -{ - unsigned posn; - counter |= (((unsigned long long)domain) << (FORKAE_COUNTER_SIZE * 8 - 3)); - for (posn = 0; posn < FORKAE_COUNTER_SIZE; ++posn) { - tweakey[16 + FORKAE_NONCE_SIZE + FORKAE_COUNTER_SIZE - 1 - posn] = - (unsigned char)counter; - counter >>= 8; - } -} - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned long long counter; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || mlen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - counter = 1; - while (mlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned long long counter; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || clen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - counter = 1; - while (clen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, c); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - lw_xor_block_2_src(m, c, tag, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, m); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, sizeof(tag)); - } else { - unsigned temp = (unsigned)clen; - unsigned char block2[FORKAE_BLOCK_SIZE]; - int check; - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - lw_xor_block_2_src(block2, tag, c, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, block2, block, block2); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (block2 + temp, FORKAE_BLOCK_SIZE - temp); - memcpy(m, block2, temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE PAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT -#undef FORKAE_PAEF_DATA_LIMIT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-saef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-saef.h deleted file mode 100644 index 768bba4..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkae-saef.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE SAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_saef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_REDUCED_TWEAKEY_SIZE Size of the reduced tweakey without padding. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || mlen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (mlen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - while (mlen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, tag, FORKAE_BLOCK_SIZE); - lw_xor_block(block, m, temp); - block[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || clen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (clen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - while (clen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)clen; - unsigned char mblock[FORKAE_BLOCK_SIZE]; - int check; - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, mblock, block, block); - lw_xor_block(mblock, tag, FORKAE_BLOCK_SIZE); - memcpy(m, mblock, temp); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (mblock + temp, FORKAE_BLOCK_SIZE - temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE SAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_TWEAKEY_REDUCED_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.c deleted file mode 100644 index b050ff1..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.c +++ /dev/null @@ -1,988 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-forkskinny.h" -#include "internal-skinnyutil.h" - -/** - * \brief 7-bit round constants for all ForkSkinny block ciphers. - */ -static unsigned char const RC[87] = { - 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7e, 0x7d, - 0x7b, 0x77, 0x6f, 0x5f, 0x3e, 0x7c, 0x79, 0x73, - 0x67, 0x4f, 0x1e, 0x3d, 0x7a, 0x75, 0x6b, 0x57, - 0x2e, 0x5c, 0x38, 0x70, 0x61, 0x43, 0x06, 0x0d, - 0x1b, 0x37, 0x6e, 0x5d, 0x3a, 0x74, 0x69, 0x53, - 0x26, 0x4c, 0x18, 0x31, 0x62, 0x45, 0x0a, 0x15, - 0x2b, 0x56, 0x2c, 0x58, 0x30, 0x60, 0x41, 0x02, - 0x05, 0x0b, 0x17, 0x2f, 0x5e, 0x3c, 0x78, 0x71, - 0x63, 0x47, 0x0e, 0x1d, 0x3b, 0x76, 0x6d, 0x5b, - 0x36, 0x6c, 0x59, 0x32, 0x64, 0x49, 0x12, 0x25, - 0x4a, 0x14, 0x29, 0x52, 0x24, 0x48, 0x10 -}; - -/** - * \brief Number of rounds of ForkSkinny-128-256 before forking. - */ -#define FORKSKINNY_128_256_ROUNDS_BEFORE 21 - -/** - * \brief Number of rounds of ForkSkinny-128-256 after forking. - */ -#define FORKSKINNY_128_256_ROUNDS_AFTER 27 - -/** - * \brief State information for ForkSkinny-128-256. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_256_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-256. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); -} - -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_BEFORE; ++round) { - forkskinny_128_256_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-256 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_inv_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - forkskinny_128_256_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-128-384 before forking. - */ -#define FORKSKINNY_128_384_ROUNDS_BEFORE 25 - -/** - * \brief Number of rounds of ForkSkinny-128-384 after forking. - */ -#define FORKSKINNY_128_384_ROUNDS_AFTER 31 - -/** - * \brief State information for ForkSkinny-128-384. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t TK3[4]; /**< Third part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_384_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-384. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_permute_tk(state->TK3); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); - skinny128_LFSR3(state->TK3[0]); - skinny128_LFSR3(state->TK3[1]); -} - -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_BEFORE; ++round) { - forkskinny_128_384_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-384 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_inv_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_LFSR3(state->TK3[0]); - skinny128_inv_LFSR3(state->TK3[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - skinny128_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - forkskinny_128_384_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_permute_tk(state.TK3); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - skinny128_LFSR3(state.TK3[0]); - skinny128_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_LFSR3(state.TK3[0]); - skinny128_inv_LFSR3(state.TK3[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - skinny128_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-64-192 before forking. - */ -#define FORKSKINNY_64_192_ROUNDS_BEFORE 17 - -/** - * \brief Number of rounds of ForkSkinny-64-192 after forking. - */ -#define FORKSKINNY_64_192_ROUNDS_AFTER 23 - -/** - * \brief State information for ForkSkinny-64-192. - */ -typedef struct -{ - uint16_t TK1[4]; /**< First part of the tweakey */ - uint16_t TK2[4]; /**< Second part of the tweakey */ - uint16_t TK3[4]; /**< Third part of the tweakey */ - uint16_t S[4]; /**< Current block state */ - -} forkskinny_64_192_state_t; - -/** - * \brief Applies one round of ForkSkinny-64-192. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - * - * Note: The cells of each row are order in big-endian nibble order - * so it is easiest to manage the rows in bit-endian byte order. - */ -static void forkskinny_64_192_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny64_sbox(s0); - skinny64_sbox(s1); - skinny64_sbox(s2); - skinny64_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Shift the cells in the rows right */ - s1 = rightRotate4_16(s1); - s2 = rightRotate8_16(s2); - s3 = rightRotate12_16(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_permute_tk(state->TK1); - skinny64_permute_tk(state->TK2); - skinny64_permute_tk(state->TK3); - skinny64_LFSR2(state->TK2[0]); - skinny64_LFSR2(state->TK2[1]); - skinny64_LFSR3(state->TK3[0]); - skinny64_LFSR3(state->TK3[1]); -} - -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_BEFORE; ++round) { - forkskinny_64_192_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint16_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x1249U; /* Branching constant */ - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-64-192 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_64_192_inv_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_inv_LFSR2(state->TK2[0]); - skinny64_inv_LFSR2(state->TK2[1]); - skinny64_inv_LFSR3(state->TK3[0]); - skinny64_inv_LFSR3(state->TK3[1]); - skinny64_inv_permute_tk(state->TK1); - skinny64_inv_permute_tk(state->TK2); - skinny64_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left */ - s1 = leftRotate4_16(s1); - s2 = leftRotate8_16(s2); - s3 = leftRotate12_16(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny64_inv_sbox(s0); - skinny64_inv_sbox(s1); - skinny64_inv_sbox(s2); - skinny64_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - forkskinny_64_192_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - skinny64_permute_tk(state.TK1); - skinny64_permute_tk(state.TK2); - skinny64_permute_tk(state.TK3); - skinny64_LFSR2(state.TK2[0]); - skinny64_LFSR2(state.TK2[1]); - skinny64_LFSR3(state.TK3[0]); - skinny64_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); - round > (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x1249U; - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_AFTER; ++round) { - skinny64_inv_LFSR2(state.TK2[0]); - skinny64_inv_LFSR2(state.TK2[1]); - skinny64_inv_LFSR3(state.TK3[0]); - skinny64_inv_LFSR3(state.TK3[1]); - skinny64_inv_permute_tk(state.TK1); - skinny64_inv_permute_tk(state.TK2); - skinny64_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&fstate, round); - } - be_store_word16(output_right, fstate.S[0]); - be_store_word16(output_right + 2, fstate.S[1]); - be_store_word16(output_right + 4, fstate.S[2]); - be_store_word16(output_right + 6, fstate.S[3]); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.h deleted file mode 100644 index 0c1a707..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-forkskinny.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_FORKSKINNY_H -#define LW_INTERNAL_FORKSKINNY_H - -/** - * \file internal-forkskinny.h - * \brief ForkSkinny block cipher family. - * - * ForkSkinny is a modified version of the SKINNY block cipher that - * supports "forking": half-way through the rounds the cipher is - * forked in two different directions to produce two different outputs. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-192 also uses this function with a padded tweakey. - */ -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-288 also uses this function with a padded tweakey. - */ -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of input with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left First output block, or NULL if left is not required. - * \param output_right Second output block, or NULL if right is not required. - * \param input 64-bit input block. - */ -/** - * \brief Encrypts a block of plaintext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 64-bit input plaintext block. - */ -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 64-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-skinnyutil.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys/internal-util.h +++ b/forkae/Implementations/crypto_aead/paefforkskinnyb128t192n48v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/api.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/api.h deleted file mode 100644 index 6c701b5..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 14 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/encrypt.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/encrypt.c deleted file mode 100644 index be76f9b..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "forkae.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_128_256_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_128_256_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.c deleted file mode 100644 index 4a9671a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "forkae.h" -#include "internal-forkskinny.h" -#include "internal-util.h" -#include - -aead_cipher_t const forkae_paef_64_192_cipher = { - "PAEF-ForkSkinny-64-192", - FORKAE_PAEF_64_192_KEY_SIZE, - FORKAE_PAEF_64_192_NONCE_SIZE, - FORKAE_PAEF_64_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_64_192_aead_encrypt, - forkae_paef_64_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_192_cipher = { - "PAEF-ForkSkinny-128-192", - FORKAE_PAEF_128_192_KEY_SIZE, - FORKAE_PAEF_128_192_NONCE_SIZE, - FORKAE_PAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_192_aead_encrypt, - forkae_paef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_256_cipher = { - "PAEF-ForkSkinny-128-256", - FORKAE_PAEF_128_256_KEY_SIZE, - FORKAE_PAEF_128_256_NONCE_SIZE, - FORKAE_PAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_256_aead_encrypt, - forkae_paef_128_256_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_288_cipher = { - "PAEF-ForkSkinny-128-288", - FORKAE_PAEF_128_288_KEY_SIZE, - FORKAE_PAEF_128_288_NONCE_SIZE, - FORKAE_PAEF_128_288_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_288_aead_encrypt, - forkae_paef_128_288_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_192_cipher = { - "SAEF-ForkSkinny-128-192", - FORKAE_SAEF_128_192_KEY_SIZE, - FORKAE_SAEF_128_192_NONCE_SIZE, - FORKAE_SAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_192_aead_encrypt, - forkae_saef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_256_cipher = { - "SAEF-ForkSkinny-128-256", - FORKAE_SAEF_128_256_KEY_SIZE, - FORKAE_SAEF_128_256_NONCE_SIZE, - FORKAE_SAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_256_aead_encrypt, - forkae_saef_128_256_aead_decrypt -}; - -/* PAEF-ForkSkinny-64-192 */ -#define FORKAE_ALG_NAME forkae_paef_64_192 -#define FORKAE_BLOCK_SIZE 8 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_64_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_64_192 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_paef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_paef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_256_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-288 */ -#define FORKAE_ALG_NAME forkae_paef_128_288 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_288_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 7 -#define FORKAE_TWEAKEY_SIZE 48 -#define FORKAE_BLOCK_FUNC forkskinny_128_384 -#include "internal-forkae-paef.h" - -/* SAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_saef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_192_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" - -/* SAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_saef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_256_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.h deleted file mode 100644 index 3e27b50..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/forkae.h +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_FORKAE_H -#define LWCRYPTO_FORKAE_H - -#include "aead-common.h" - -/** - * \file forkae.h - * \brief ForkAE authenticated encryption algorithm family. - * - * ForkAE is a family of authenticated encryption algorithms based on a - * modified version of the SKINNY tweakable block cipher. The modifications - * introduce "forking" where each input block produces two output blocks - * for use in encryption and authentication. There are six members in - * the ForkAE family: - * - * \li PAEF-ForkSkinny-64-192 has a 128-bit key, a 48-bit nonce, and a - * 64-bit authentication tag. The associated data and plaintext are - * limited to 216 bytes. - * \li PAEF-ForkSkinny-128-192 has a 128-bit key, a 48-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-256 has a 128-bit key, a 112-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-288 has a 128-bit key, a 104-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 257 bytes. This is the primary member of the family. - * \li SAEF-ForkSkinny-128-192 has a 128-bit key, a 56-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * \li SAEF-ForkSkinny-128-256 has a 128-bit key, a 120-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * - * The PAEF variants support parallel encryption and decryption for - * higher throughput. The SAEF variants encrypt or decrypt blocks - * sequentially. - * - * ForkAE is designed to be efficient on small packet sizes so most of - * the PAEF algorithms have a limit of 64k or 128k on the amount of - * payload in a single packet. Obviously the input can be split into - * separate packets for larger amounts of data. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_TAG_SIZE 8 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_NONCE_SIZE 14 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_NONCE_SIZE 13 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_NONCE_SIZE 7 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-64-192 cipher. - */ -extern aead_cipher_t const forkae_paef_64_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_paef_128_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_paef_128_256_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-288 cipher. - */ -extern aead_cipher_t const forkae_paef_128_288_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_saef_128_192_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_saef_128_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_64_192_aead_decrypt() - */ -int forkae_paef_64_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_64_192_aead_encrypt() - */ -int forkae_paef_64_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_192_aead_decrypt() - */ -int forkae_paef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_192_aead_encrypt() - */ -int forkae_paef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_256_aead_decrypt() - */ -int forkae_paef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_256_aead_encrypt() - */ -int forkae_paef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_288_aead_decrypt() - */ -int forkae_paef_128_288_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_288_aead_encrypt() - */ -int forkae_paef_128_288_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_192_aead_decrypt() - */ -int forkae_saef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_192_aead_encrypt() - */ -int forkae_saef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_256_aead_decrypt() - */ -int forkae_saef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_256_aead_encrypt() - */ -int forkae_saef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-paef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-paef.h deleted file mode 100644 index 6f57b2b..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-paef.h +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE PAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_paef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_COUNTER_SIZE Size of the counter value for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Limit on the amount of data we can process based on the counter size */ -#define FORKAE_PAEF_DATA_LIMIT \ - ((unsigned long long)((1ULL << (FORKAE_COUNTER_SIZE * 8)) * \ - (FORKAE_BLOCK_SIZE / 8)) - FORKAE_BLOCK_SIZE) - -/* Processes the associated data in PAEF mode */ -STATIC_INLINE void FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter) - (unsigned char tweakey[FORKAE_TWEAKEY_SIZE], - unsigned long long counter, unsigned char domain) -{ - unsigned posn; - counter |= (((unsigned long long)domain) << (FORKAE_COUNTER_SIZE * 8 - 3)); - for (posn = 0; posn < FORKAE_COUNTER_SIZE; ++posn) { - tweakey[16 + FORKAE_NONCE_SIZE + FORKAE_COUNTER_SIZE - 1 - posn] = - (unsigned char)counter; - counter >>= 8; - } -} - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned long long counter; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || mlen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - counter = 1; - while (mlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned long long counter; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || clen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - counter = 1; - while (clen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, c); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - lw_xor_block_2_src(m, c, tag, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, m); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, sizeof(tag)); - } else { - unsigned temp = (unsigned)clen; - unsigned char block2[FORKAE_BLOCK_SIZE]; - int check; - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - lw_xor_block_2_src(block2, tag, c, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, block2, block, block2); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (block2 + temp, FORKAE_BLOCK_SIZE - temp); - memcpy(m, block2, temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE PAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT -#undef FORKAE_PAEF_DATA_LIMIT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-saef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-saef.h deleted file mode 100644 index 768bba4..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkae-saef.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE SAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_saef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_REDUCED_TWEAKEY_SIZE Size of the reduced tweakey without padding. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || mlen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (mlen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - while (mlen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, tag, FORKAE_BLOCK_SIZE); - lw_xor_block(block, m, temp); - block[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || clen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (clen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - while (clen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)clen; - unsigned char mblock[FORKAE_BLOCK_SIZE]; - int check; - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, mblock, block, block); - lw_xor_block(mblock, tag, FORKAE_BLOCK_SIZE); - memcpy(m, mblock, temp); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (mblock + temp, FORKAE_BLOCK_SIZE - temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE SAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_TWEAKEY_REDUCED_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.c deleted file mode 100644 index b050ff1..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.c +++ /dev/null @@ -1,988 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-forkskinny.h" -#include "internal-skinnyutil.h" - -/** - * \brief 7-bit round constants for all ForkSkinny block ciphers. - */ -static unsigned char const RC[87] = { - 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7e, 0x7d, - 0x7b, 0x77, 0x6f, 0x5f, 0x3e, 0x7c, 0x79, 0x73, - 0x67, 0x4f, 0x1e, 0x3d, 0x7a, 0x75, 0x6b, 0x57, - 0x2e, 0x5c, 0x38, 0x70, 0x61, 0x43, 0x06, 0x0d, - 0x1b, 0x37, 0x6e, 0x5d, 0x3a, 0x74, 0x69, 0x53, - 0x26, 0x4c, 0x18, 0x31, 0x62, 0x45, 0x0a, 0x15, - 0x2b, 0x56, 0x2c, 0x58, 0x30, 0x60, 0x41, 0x02, - 0x05, 0x0b, 0x17, 0x2f, 0x5e, 0x3c, 0x78, 0x71, - 0x63, 0x47, 0x0e, 0x1d, 0x3b, 0x76, 0x6d, 0x5b, - 0x36, 0x6c, 0x59, 0x32, 0x64, 0x49, 0x12, 0x25, - 0x4a, 0x14, 0x29, 0x52, 0x24, 0x48, 0x10 -}; - -/** - * \brief Number of rounds of ForkSkinny-128-256 before forking. - */ -#define FORKSKINNY_128_256_ROUNDS_BEFORE 21 - -/** - * \brief Number of rounds of ForkSkinny-128-256 after forking. - */ -#define FORKSKINNY_128_256_ROUNDS_AFTER 27 - -/** - * \brief State information for ForkSkinny-128-256. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_256_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-256. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); -} - -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_BEFORE; ++round) { - forkskinny_128_256_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-256 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_inv_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - forkskinny_128_256_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-128-384 before forking. - */ -#define FORKSKINNY_128_384_ROUNDS_BEFORE 25 - -/** - * \brief Number of rounds of ForkSkinny-128-384 after forking. - */ -#define FORKSKINNY_128_384_ROUNDS_AFTER 31 - -/** - * \brief State information for ForkSkinny-128-384. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t TK3[4]; /**< Third part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_384_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-384. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_permute_tk(state->TK3); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); - skinny128_LFSR3(state->TK3[0]); - skinny128_LFSR3(state->TK3[1]); -} - -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_BEFORE; ++round) { - forkskinny_128_384_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-384 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_inv_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_LFSR3(state->TK3[0]); - skinny128_inv_LFSR3(state->TK3[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - skinny128_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - forkskinny_128_384_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_permute_tk(state.TK3); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - skinny128_LFSR3(state.TK3[0]); - skinny128_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_LFSR3(state.TK3[0]); - skinny128_inv_LFSR3(state.TK3[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - skinny128_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-64-192 before forking. - */ -#define FORKSKINNY_64_192_ROUNDS_BEFORE 17 - -/** - * \brief Number of rounds of ForkSkinny-64-192 after forking. - */ -#define FORKSKINNY_64_192_ROUNDS_AFTER 23 - -/** - * \brief State information for ForkSkinny-64-192. - */ -typedef struct -{ - uint16_t TK1[4]; /**< First part of the tweakey */ - uint16_t TK2[4]; /**< Second part of the tweakey */ - uint16_t TK3[4]; /**< Third part of the tweakey */ - uint16_t S[4]; /**< Current block state */ - -} forkskinny_64_192_state_t; - -/** - * \brief Applies one round of ForkSkinny-64-192. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - * - * Note: The cells of each row are order in big-endian nibble order - * so it is easiest to manage the rows in bit-endian byte order. - */ -static void forkskinny_64_192_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny64_sbox(s0); - skinny64_sbox(s1); - skinny64_sbox(s2); - skinny64_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Shift the cells in the rows right */ - s1 = rightRotate4_16(s1); - s2 = rightRotate8_16(s2); - s3 = rightRotate12_16(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_permute_tk(state->TK1); - skinny64_permute_tk(state->TK2); - skinny64_permute_tk(state->TK3); - skinny64_LFSR2(state->TK2[0]); - skinny64_LFSR2(state->TK2[1]); - skinny64_LFSR3(state->TK3[0]); - skinny64_LFSR3(state->TK3[1]); -} - -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_BEFORE; ++round) { - forkskinny_64_192_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint16_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x1249U; /* Branching constant */ - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-64-192 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_64_192_inv_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_inv_LFSR2(state->TK2[0]); - skinny64_inv_LFSR2(state->TK2[1]); - skinny64_inv_LFSR3(state->TK3[0]); - skinny64_inv_LFSR3(state->TK3[1]); - skinny64_inv_permute_tk(state->TK1); - skinny64_inv_permute_tk(state->TK2); - skinny64_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left */ - s1 = leftRotate4_16(s1); - s2 = leftRotate8_16(s2); - s3 = leftRotate12_16(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny64_inv_sbox(s0); - skinny64_inv_sbox(s1); - skinny64_inv_sbox(s2); - skinny64_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - forkskinny_64_192_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - skinny64_permute_tk(state.TK1); - skinny64_permute_tk(state.TK2); - skinny64_permute_tk(state.TK3); - skinny64_LFSR2(state.TK2[0]); - skinny64_LFSR2(state.TK2[1]); - skinny64_LFSR3(state.TK3[0]); - skinny64_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); - round > (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x1249U; - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_AFTER; ++round) { - skinny64_inv_LFSR2(state.TK2[0]); - skinny64_inv_LFSR2(state.TK2[1]); - skinny64_inv_LFSR3(state.TK3[0]); - skinny64_inv_LFSR3(state.TK3[1]); - skinny64_inv_permute_tk(state.TK1); - skinny64_inv_permute_tk(state.TK2); - skinny64_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&fstate, round); - } - be_store_word16(output_right, fstate.S[0]); - be_store_word16(output_right + 2, fstate.S[1]); - be_store_word16(output_right + 4, fstate.S[2]); - be_store_word16(output_right + 6, fstate.S[3]); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.h deleted file mode 100644 index 0c1a707..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-forkskinny.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_FORKSKINNY_H -#define LW_INTERNAL_FORKSKINNY_H - -/** - * \file internal-forkskinny.h - * \brief ForkSkinny block cipher family. - * - * ForkSkinny is a modified version of the SKINNY block cipher that - * supports "forking": half-way through the rounds the cipher is - * forked in two different directions to produce two different outputs. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-192 also uses this function with a padded tweakey. - */ -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-288 also uses this function with a padded tweakey. - */ -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of input with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left First output block, or NULL if left is not required. - * \param output_right Second output block, or NULL if right is not required. - * \param input 64-bit input block. - */ -/** - * \brief Encrypts a block of plaintext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 64-bit input plaintext block. - */ -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 64-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-skinnyutil.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys/internal-util.h +++ b/forkae/Implementations/crypto_aead/paefforkskinnyb128t256n112v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/api.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/api.h deleted file mode 100644 index 500c2c7..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 13 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/encrypt.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/encrypt.c deleted file mode 100644 index b23be7f..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "forkae.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_128_288_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_128_288_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.c deleted file mode 100644 index 4a9671a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "forkae.h" -#include "internal-forkskinny.h" -#include "internal-util.h" -#include - -aead_cipher_t const forkae_paef_64_192_cipher = { - "PAEF-ForkSkinny-64-192", - FORKAE_PAEF_64_192_KEY_SIZE, - FORKAE_PAEF_64_192_NONCE_SIZE, - FORKAE_PAEF_64_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_64_192_aead_encrypt, - forkae_paef_64_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_192_cipher = { - "PAEF-ForkSkinny-128-192", - FORKAE_PAEF_128_192_KEY_SIZE, - FORKAE_PAEF_128_192_NONCE_SIZE, - FORKAE_PAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_192_aead_encrypt, - forkae_paef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_256_cipher = { - "PAEF-ForkSkinny-128-256", - FORKAE_PAEF_128_256_KEY_SIZE, - FORKAE_PAEF_128_256_NONCE_SIZE, - FORKAE_PAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_256_aead_encrypt, - forkae_paef_128_256_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_288_cipher = { - "PAEF-ForkSkinny-128-288", - FORKAE_PAEF_128_288_KEY_SIZE, - FORKAE_PAEF_128_288_NONCE_SIZE, - FORKAE_PAEF_128_288_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_288_aead_encrypt, - forkae_paef_128_288_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_192_cipher = { - "SAEF-ForkSkinny-128-192", - FORKAE_SAEF_128_192_KEY_SIZE, - FORKAE_SAEF_128_192_NONCE_SIZE, - FORKAE_SAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_192_aead_encrypt, - forkae_saef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_256_cipher = { - "SAEF-ForkSkinny-128-256", - FORKAE_SAEF_128_256_KEY_SIZE, - FORKAE_SAEF_128_256_NONCE_SIZE, - FORKAE_SAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_256_aead_encrypt, - forkae_saef_128_256_aead_decrypt -}; - -/* PAEF-ForkSkinny-64-192 */ -#define FORKAE_ALG_NAME forkae_paef_64_192 -#define FORKAE_BLOCK_SIZE 8 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_64_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_64_192 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_paef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_paef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_256_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-288 */ -#define FORKAE_ALG_NAME forkae_paef_128_288 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_288_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 7 -#define FORKAE_TWEAKEY_SIZE 48 -#define FORKAE_BLOCK_FUNC forkskinny_128_384 -#include "internal-forkae-paef.h" - -/* SAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_saef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_192_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" - -/* SAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_saef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_256_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.h deleted file mode 100644 index 3e27b50..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/forkae.h +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_FORKAE_H -#define LWCRYPTO_FORKAE_H - -#include "aead-common.h" - -/** - * \file forkae.h - * \brief ForkAE authenticated encryption algorithm family. - * - * ForkAE is a family of authenticated encryption algorithms based on a - * modified version of the SKINNY tweakable block cipher. The modifications - * introduce "forking" where each input block produces two output blocks - * for use in encryption and authentication. There are six members in - * the ForkAE family: - * - * \li PAEF-ForkSkinny-64-192 has a 128-bit key, a 48-bit nonce, and a - * 64-bit authentication tag. The associated data and plaintext are - * limited to 216 bytes. - * \li PAEF-ForkSkinny-128-192 has a 128-bit key, a 48-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-256 has a 128-bit key, a 112-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-288 has a 128-bit key, a 104-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 257 bytes. This is the primary member of the family. - * \li SAEF-ForkSkinny-128-192 has a 128-bit key, a 56-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * \li SAEF-ForkSkinny-128-256 has a 128-bit key, a 120-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * - * The PAEF variants support parallel encryption and decryption for - * higher throughput. The SAEF variants encrypt or decrypt blocks - * sequentially. - * - * ForkAE is designed to be efficient on small packet sizes so most of - * the PAEF algorithms have a limit of 64k or 128k on the amount of - * payload in a single packet. Obviously the input can be split into - * separate packets for larger amounts of data. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_TAG_SIZE 8 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_NONCE_SIZE 14 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_NONCE_SIZE 13 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_NONCE_SIZE 7 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-64-192 cipher. - */ -extern aead_cipher_t const forkae_paef_64_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_paef_128_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_paef_128_256_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-288 cipher. - */ -extern aead_cipher_t const forkae_paef_128_288_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_saef_128_192_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_saef_128_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_64_192_aead_decrypt() - */ -int forkae_paef_64_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_64_192_aead_encrypt() - */ -int forkae_paef_64_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_192_aead_decrypt() - */ -int forkae_paef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_192_aead_encrypt() - */ -int forkae_paef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_256_aead_decrypt() - */ -int forkae_paef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_256_aead_encrypt() - */ -int forkae_paef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_288_aead_decrypt() - */ -int forkae_paef_128_288_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_288_aead_encrypt() - */ -int forkae_paef_128_288_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_192_aead_decrypt() - */ -int forkae_saef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_192_aead_encrypt() - */ -int forkae_saef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_256_aead_decrypt() - */ -int forkae_saef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_256_aead_encrypt() - */ -int forkae_saef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-paef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-paef.h deleted file mode 100644 index 6f57b2b..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-paef.h +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE PAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_paef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_COUNTER_SIZE Size of the counter value for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Limit on the amount of data we can process based on the counter size */ -#define FORKAE_PAEF_DATA_LIMIT \ - ((unsigned long long)((1ULL << (FORKAE_COUNTER_SIZE * 8)) * \ - (FORKAE_BLOCK_SIZE / 8)) - FORKAE_BLOCK_SIZE) - -/* Processes the associated data in PAEF mode */ -STATIC_INLINE void FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter) - (unsigned char tweakey[FORKAE_TWEAKEY_SIZE], - unsigned long long counter, unsigned char domain) -{ - unsigned posn; - counter |= (((unsigned long long)domain) << (FORKAE_COUNTER_SIZE * 8 - 3)); - for (posn = 0; posn < FORKAE_COUNTER_SIZE; ++posn) { - tweakey[16 + FORKAE_NONCE_SIZE + FORKAE_COUNTER_SIZE - 1 - posn] = - (unsigned char)counter; - counter >>= 8; - } -} - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned long long counter; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || mlen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - counter = 1; - while (mlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned long long counter; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || clen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - counter = 1; - while (clen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, c); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - lw_xor_block_2_src(m, c, tag, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, m); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, sizeof(tag)); - } else { - unsigned temp = (unsigned)clen; - unsigned char block2[FORKAE_BLOCK_SIZE]; - int check; - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - lw_xor_block_2_src(block2, tag, c, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, block2, block, block2); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (block2 + temp, FORKAE_BLOCK_SIZE - temp); - memcpy(m, block2, temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE PAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT -#undef FORKAE_PAEF_DATA_LIMIT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-saef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-saef.h deleted file mode 100644 index 768bba4..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkae-saef.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE SAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_saef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_REDUCED_TWEAKEY_SIZE Size of the reduced tweakey without padding. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || mlen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (mlen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - while (mlen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, tag, FORKAE_BLOCK_SIZE); - lw_xor_block(block, m, temp); - block[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || clen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (clen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - while (clen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)clen; - unsigned char mblock[FORKAE_BLOCK_SIZE]; - int check; - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, mblock, block, block); - lw_xor_block(mblock, tag, FORKAE_BLOCK_SIZE); - memcpy(m, mblock, temp); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (mblock + temp, FORKAE_BLOCK_SIZE - temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE SAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_TWEAKEY_REDUCED_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.c b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.c deleted file mode 100644 index b050ff1..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.c +++ /dev/null @@ -1,988 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-forkskinny.h" -#include "internal-skinnyutil.h" - -/** - * \brief 7-bit round constants for all ForkSkinny block ciphers. - */ -static unsigned char const RC[87] = { - 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7e, 0x7d, - 0x7b, 0x77, 0x6f, 0x5f, 0x3e, 0x7c, 0x79, 0x73, - 0x67, 0x4f, 0x1e, 0x3d, 0x7a, 0x75, 0x6b, 0x57, - 0x2e, 0x5c, 0x38, 0x70, 0x61, 0x43, 0x06, 0x0d, - 0x1b, 0x37, 0x6e, 0x5d, 0x3a, 0x74, 0x69, 0x53, - 0x26, 0x4c, 0x18, 0x31, 0x62, 0x45, 0x0a, 0x15, - 0x2b, 0x56, 0x2c, 0x58, 0x30, 0x60, 0x41, 0x02, - 0x05, 0x0b, 0x17, 0x2f, 0x5e, 0x3c, 0x78, 0x71, - 0x63, 0x47, 0x0e, 0x1d, 0x3b, 0x76, 0x6d, 0x5b, - 0x36, 0x6c, 0x59, 0x32, 0x64, 0x49, 0x12, 0x25, - 0x4a, 0x14, 0x29, 0x52, 0x24, 0x48, 0x10 -}; - -/** - * \brief Number of rounds of ForkSkinny-128-256 before forking. - */ -#define FORKSKINNY_128_256_ROUNDS_BEFORE 21 - -/** - * \brief Number of rounds of ForkSkinny-128-256 after forking. - */ -#define FORKSKINNY_128_256_ROUNDS_AFTER 27 - -/** - * \brief State information for ForkSkinny-128-256. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_256_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-256. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); -} - -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_BEFORE; ++round) { - forkskinny_128_256_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-256 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_inv_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - forkskinny_128_256_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-128-384 before forking. - */ -#define FORKSKINNY_128_384_ROUNDS_BEFORE 25 - -/** - * \brief Number of rounds of ForkSkinny-128-384 after forking. - */ -#define FORKSKINNY_128_384_ROUNDS_AFTER 31 - -/** - * \brief State information for ForkSkinny-128-384. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t TK3[4]; /**< Third part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_384_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-384. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_permute_tk(state->TK3); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); - skinny128_LFSR3(state->TK3[0]); - skinny128_LFSR3(state->TK3[1]); -} - -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_BEFORE; ++round) { - forkskinny_128_384_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-384 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_inv_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_LFSR3(state->TK3[0]); - skinny128_inv_LFSR3(state->TK3[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - skinny128_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - forkskinny_128_384_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_permute_tk(state.TK3); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - skinny128_LFSR3(state.TK3[0]); - skinny128_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_LFSR3(state.TK3[0]); - skinny128_inv_LFSR3(state.TK3[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - skinny128_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-64-192 before forking. - */ -#define FORKSKINNY_64_192_ROUNDS_BEFORE 17 - -/** - * \brief Number of rounds of ForkSkinny-64-192 after forking. - */ -#define FORKSKINNY_64_192_ROUNDS_AFTER 23 - -/** - * \brief State information for ForkSkinny-64-192. - */ -typedef struct -{ - uint16_t TK1[4]; /**< First part of the tweakey */ - uint16_t TK2[4]; /**< Second part of the tweakey */ - uint16_t TK3[4]; /**< Third part of the tweakey */ - uint16_t S[4]; /**< Current block state */ - -} forkskinny_64_192_state_t; - -/** - * \brief Applies one round of ForkSkinny-64-192. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - * - * Note: The cells of each row are order in big-endian nibble order - * so it is easiest to manage the rows in bit-endian byte order. - */ -static void forkskinny_64_192_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny64_sbox(s0); - skinny64_sbox(s1); - skinny64_sbox(s2); - skinny64_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Shift the cells in the rows right */ - s1 = rightRotate4_16(s1); - s2 = rightRotate8_16(s2); - s3 = rightRotate12_16(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_permute_tk(state->TK1); - skinny64_permute_tk(state->TK2); - skinny64_permute_tk(state->TK3); - skinny64_LFSR2(state->TK2[0]); - skinny64_LFSR2(state->TK2[1]); - skinny64_LFSR3(state->TK3[0]); - skinny64_LFSR3(state->TK3[1]); -} - -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_BEFORE; ++round) { - forkskinny_64_192_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint16_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x1249U; /* Branching constant */ - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-64-192 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_64_192_inv_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_inv_LFSR2(state->TK2[0]); - skinny64_inv_LFSR2(state->TK2[1]); - skinny64_inv_LFSR3(state->TK3[0]); - skinny64_inv_LFSR3(state->TK3[1]); - skinny64_inv_permute_tk(state->TK1); - skinny64_inv_permute_tk(state->TK2); - skinny64_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left */ - s1 = leftRotate4_16(s1); - s2 = leftRotate8_16(s2); - s3 = leftRotate12_16(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny64_inv_sbox(s0); - skinny64_inv_sbox(s1); - skinny64_inv_sbox(s2); - skinny64_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - forkskinny_64_192_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - skinny64_permute_tk(state.TK1); - skinny64_permute_tk(state.TK2); - skinny64_permute_tk(state.TK3); - skinny64_LFSR2(state.TK2[0]); - skinny64_LFSR2(state.TK2[1]); - skinny64_LFSR3(state.TK3[0]); - skinny64_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); - round > (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x1249U; - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_AFTER; ++round) { - skinny64_inv_LFSR2(state.TK2[0]); - skinny64_inv_LFSR2(state.TK2[1]); - skinny64_inv_LFSR3(state.TK3[0]); - skinny64_inv_LFSR3(state.TK3[1]); - skinny64_inv_permute_tk(state.TK1); - skinny64_inv_permute_tk(state.TK2); - skinny64_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&fstate, round); - } - be_store_word16(output_right, fstate.S[0]); - be_store_word16(output_right + 2, fstate.S[1]); - be_store_word16(output_right + 4, fstate.S[2]); - be_store_word16(output_right + 6, fstate.S[3]); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.h deleted file mode 100644 index 0c1a707..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-forkskinny.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_FORKSKINNY_H -#define LW_INTERNAL_FORKSKINNY_H - -/** - * \file internal-forkskinny.h - * \brief ForkSkinny block cipher family. - * - * ForkSkinny is a modified version of the SKINNY block cipher that - * supports "forking": half-way through the rounds the cipher is - * forked in two different directions to produce two different outputs. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-192 also uses this function with a padded tweakey. - */ -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-288 also uses this function with a padded tweakey. - */ -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of input with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left First output block, or NULL if left is not required. - * \param output_right Second output block, or NULL if right is not required. - * \param input 64-bit input block. - */ -/** - * \brief Encrypts a block of plaintext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 64-bit input plaintext block. - */ -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 64-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-skinnyutil.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys/internal-util.h +++ b/forkae/Implementations/crypto_aead/paefforkskinnyb128t288n104v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.c b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/api.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/api.h deleted file mode 100644 index f04cc58..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 6 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/encrypt.c b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/encrypt.c deleted file mode 100644 index 275b77e..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "forkae.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_64_192_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_paef_64_192_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.c b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.c deleted file mode 100644 index 4a9671a..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "forkae.h" -#include "internal-forkskinny.h" -#include "internal-util.h" -#include - -aead_cipher_t const forkae_paef_64_192_cipher = { - "PAEF-ForkSkinny-64-192", - FORKAE_PAEF_64_192_KEY_SIZE, - FORKAE_PAEF_64_192_NONCE_SIZE, - FORKAE_PAEF_64_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_64_192_aead_encrypt, - forkae_paef_64_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_192_cipher = { - "PAEF-ForkSkinny-128-192", - FORKAE_PAEF_128_192_KEY_SIZE, - FORKAE_PAEF_128_192_NONCE_SIZE, - FORKAE_PAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_192_aead_encrypt, - forkae_paef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_256_cipher = { - "PAEF-ForkSkinny-128-256", - FORKAE_PAEF_128_256_KEY_SIZE, - FORKAE_PAEF_128_256_NONCE_SIZE, - FORKAE_PAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_256_aead_encrypt, - forkae_paef_128_256_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_288_cipher = { - "PAEF-ForkSkinny-128-288", - FORKAE_PAEF_128_288_KEY_SIZE, - FORKAE_PAEF_128_288_NONCE_SIZE, - FORKAE_PAEF_128_288_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_288_aead_encrypt, - forkae_paef_128_288_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_192_cipher = { - "SAEF-ForkSkinny-128-192", - FORKAE_SAEF_128_192_KEY_SIZE, - FORKAE_SAEF_128_192_NONCE_SIZE, - FORKAE_SAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_192_aead_encrypt, - forkae_saef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_256_cipher = { - "SAEF-ForkSkinny-128-256", - FORKAE_SAEF_128_256_KEY_SIZE, - FORKAE_SAEF_128_256_NONCE_SIZE, - FORKAE_SAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_256_aead_encrypt, - forkae_saef_128_256_aead_decrypt -}; - -/* PAEF-ForkSkinny-64-192 */ -#define FORKAE_ALG_NAME forkae_paef_64_192 -#define FORKAE_BLOCK_SIZE 8 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_64_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_64_192 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_paef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_paef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_256_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-288 */ -#define FORKAE_ALG_NAME forkae_paef_128_288 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_288_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 7 -#define FORKAE_TWEAKEY_SIZE 48 -#define FORKAE_BLOCK_FUNC forkskinny_128_384 -#include "internal-forkae-paef.h" - -/* SAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_saef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_192_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" - -/* SAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_saef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_256_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.h deleted file mode 100644 index 3e27b50..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/forkae.h +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_FORKAE_H -#define LWCRYPTO_FORKAE_H - -#include "aead-common.h" - -/** - * \file forkae.h - * \brief ForkAE authenticated encryption algorithm family. - * - * ForkAE is a family of authenticated encryption algorithms based on a - * modified version of the SKINNY tweakable block cipher. The modifications - * introduce "forking" where each input block produces two output blocks - * for use in encryption and authentication. There are six members in - * the ForkAE family: - * - * \li PAEF-ForkSkinny-64-192 has a 128-bit key, a 48-bit nonce, and a - * 64-bit authentication tag. The associated data and plaintext are - * limited to 216 bytes. - * \li PAEF-ForkSkinny-128-192 has a 128-bit key, a 48-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-256 has a 128-bit key, a 112-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-288 has a 128-bit key, a 104-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 257 bytes. This is the primary member of the family. - * \li SAEF-ForkSkinny-128-192 has a 128-bit key, a 56-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * \li SAEF-ForkSkinny-128-256 has a 128-bit key, a 120-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * - * The PAEF variants support parallel encryption and decryption for - * higher throughput. The SAEF variants encrypt or decrypt blocks - * sequentially. - * - * ForkAE is designed to be efficient on small packet sizes so most of - * the PAEF algorithms have a limit of 64k or 128k on the amount of - * payload in a single packet. Obviously the input can be split into - * separate packets for larger amounts of data. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_TAG_SIZE 8 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_NONCE_SIZE 14 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_NONCE_SIZE 13 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_NONCE_SIZE 7 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-64-192 cipher. - */ -extern aead_cipher_t const forkae_paef_64_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_paef_128_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_paef_128_256_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-288 cipher. - */ -extern aead_cipher_t const forkae_paef_128_288_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_saef_128_192_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_saef_128_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_64_192_aead_decrypt() - */ -int forkae_paef_64_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_64_192_aead_encrypt() - */ -int forkae_paef_64_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_192_aead_decrypt() - */ -int forkae_paef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_192_aead_encrypt() - */ -int forkae_paef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_256_aead_decrypt() - */ -int forkae_paef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_256_aead_encrypt() - */ -int forkae_paef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_288_aead_decrypt() - */ -int forkae_paef_128_288_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_288_aead_encrypt() - */ -int forkae_paef_128_288_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_192_aead_decrypt() - */ -int forkae_saef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_192_aead_encrypt() - */ -int forkae_saef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_256_aead_decrypt() - */ -int forkae_saef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_256_aead_encrypt() - */ -int forkae_saef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-paef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-paef.h deleted file mode 100644 index 6f57b2b..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-paef.h +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE PAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_paef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_COUNTER_SIZE Size of the counter value for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Limit on the amount of data we can process based on the counter size */ -#define FORKAE_PAEF_DATA_LIMIT \ - ((unsigned long long)((1ULL << (FORKAE_COUNTER_SIZE * 8)) * \ - (FORKAE_BLOCK_SIZE / 8)) - FORKAE_BLOCK_SIZE) - -/* Processes the associated data in PAEF mode */ -STATIC_INLINE void FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter) - (unsigned char tweakey[FORKAE_TWEAKEY_SIZE], - unsigned long long counter, unsigned char domain) -{ - unsigned posn; - counter |= (((unsigned long long)domain) << (FORKAE_COUNTER_SIZE * 8 - 3)); - for (posn = 0; posn < FORKAE_COUNTER_SIZE; ++posn) { - tweakey[16 + FORKAE_NONCE_SIZE + FORKAE_COUNTER_SIZE - 1 - posn] = - (unsigned char)counter; - counter >>= 8; - } -} - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned long long counter; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || mlen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - counter = 1; - while (mlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned long long counter; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || clen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - counter = 1; - while (clen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, c); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - lw_xor_block_2_src(m, c, tag, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, m); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, sizeof(tag)); - } else { - unsigned temp = (unsigned)clen; - unsigned char block2[FORKAE_BLOCK_SIZE]; - int check; - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - lw_xor_block_2_src(block2, tag, c, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, block2, block, block2); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (block2 + temp, FORKAE_BLOCK_SIZE - temp); - memcpy(m, block2, temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE PAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT -#undef FORKAE_PAEF_DATA_LIMIT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-saef.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-saef.h deleted file mode 100644 index 768bba4..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkae-saef.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE SAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_saef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_REDUCED_TWEAKEY_SIZE Size of the reduced tweakey without padding. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || mlen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (mlen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - while (mlen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, tag, FORKAE_BLOCK_SIZE); - lw_xor_block(block, m, temp); - block[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || clen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (clen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - while (clen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)clen; - unsigned char mblock[FORKAE_BLOCK_SIZE]; - int check; - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, mblock, block, block); - lw_xor_block(mblock, tag, FORKAE_BLOCK_SIZE); - memcpy(m, mblock, temp); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (mblock + temp, FORKAE_BLOCK_SIZE - temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE SAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_TWEAKEY_REDUCED_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.c b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.c deleted file mode 100644 index b050ff1..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.c +++ /dev/null @@ -1,988 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-forkskinny.h" -#include "internal-skinnyutil.h" - -/** - * \brief 7-bit round constants for all ForkSkinny block ciphers. - */ -static unsigned char const RC[87] = { - 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7e, 0x7d, - 0x7b, 0x77, 0x6f, 0x5f, 0x3e, 0x7c, 0x79, 0x73, - 0x67, 0x4f, 0x1e, 0x3d, 0x7a, 0x75, 0x6b, 0x57, - 0x2e, 0x5c, 0x38, 0x70, 0x61, 0x43, 0x06, 0x0d, - 0x1b, 0x37, 0x6e, 0x5d, 0x3a, 0x74, 0x69, 0x53, - 0x26, 0x4c, 0x18, 0x31, 0x62, 0x45, 0x0a, 0x15, - 0x2b, 0x56, 0x2c, 0x58, 0x30, 0x60, 0x41, 0x02, - 0x05, 0x0b, 0x17, 0x2f, 0x5e, 0x3c, 0x78, 0x71, - 0x63, 0x47, 0x0e, 0x1d, 0x3b, 0x76, 0x6d, 0x5b, - 0x36, 0x6c, 0x59, 0x32, 0x64, 0x49, 0x12, 0x25, - 0x4a, 0x14, 0x29, 0x52, 0x24, 0x48, 0x10 -}; - -/** - * \brief Number of rounds of ForkSkinny-128-256 before forking. - */ -#define FORKSKINNY_128_256_ROUNDS_BEFORE 21 - -/** - * \brief Number of rounds of ForkSkinny-128-256 after forking. - */ -#define FORKSKINNY_128_256_ROUNDS_AFTER 27 - -/** - * \brief State information for ForkSkinny-128-256. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_256_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-256. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); -} - -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_BEFORE; ++round) { - forkskinny_128_256_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-256 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_inv_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - forkskinny_128_256_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-128-384 before forking. - */ -#define FORKSKINNY_128_384_ROUNDS_BEFORE 25 - -/** - * \brief Number of rounds of ForkSkinny-128-384 after forking. - */ -#define FORKSKINNY_128_384_ROUNDS_AFTER 31 - -/** - * \brief State information for ForkSkinny-128-384. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t TK3[4]; /**< Third part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_384_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-384. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_permute_tk(state->TK3); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); - skinny128_LFSR3(state->TK3[0]); - skinny128_LFSR3(state->TK3[1]); -} - -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_BEFORE; ++round) { - forkskinny_128_384_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-384 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_inv_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_LFSR3(state->TK3[0]); - skinny128_inv_LFSR3(state->TK3[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - skinny128_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - forkskinny_128_384_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_permute_tk(state.TK3); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - skinny128_LFSR3(state.TK3[0]); - skinny128_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_LFSR3(state.TK3[0]); - skinny128_inv_LFSR3(state.TK3[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - skinny128_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-64-192 before forking. - */ -#define FORKSKINNY_64_192_ROUNDS_BEFORE 17 - -/** - * \brief Number of rounds of ForkSkinny-64-192 after forking. - */ -#define FORKSKINNY_64_192_ROUNDS_AFTER 23 - -/** - * \brief State information for ForkSkinny-64-192. - */ -typedef struct -{ - uint16_t TK1[4]; /**< First part of the tweakey */ - uint16_t TK2[4]; /**< Second part of the tweakey */ - uint16_t TK3[4]; /**< Third part of the tweakey */ - uint16_t S[4]; /**< Current block state */ - -} forkskinny_64_192_state_t; - -/** - * \brief Applies one round of ForkSkinny-64-192. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - * - * Note: The cells of each row are order in big-endian nibble order - * so it is easiest to manage the rows in bit-endian byte order. - */ -static void forkskinny_64_192_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny64_sbox(s0); - skinny64_sbox(s1); - skinny64_sbox(s2); - skinny64_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Shift the cells in the rows right */ - s1 = rightRotate4_16(s1); - s2 = rightRotate8_16(s2); - s3 = rightRotate12_16(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_permute_tk(state->TK1); - skinny64_permute_tk(state->TK2); - skinny64_permute_tk(state->TK3); - skinny64_LFSR2(state->TK2[0]); - skinny64_LFSR2(state->TK2[1]); - skinny64_LFSR3(state->TK3[0]); - skinny64_LFSR3(state->TK3[1]); -} - -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_BEFORE; ++round) { - forkskinny_64_192_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint16_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x1249U; /* Branching constant */ - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-64-192 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_64_192_inv_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_inv_LFSR2(state->TK2[0]); - skinny64_inv_LFSR2(state->TK2[1]); - skinny64_inv_LFSR3(state->TK3[0]); - skinny64_inv_LFSR3(state->TK3[1]); - skinny64_inv_permute_tk(state->TK1); - skinny64_inv_permute_tk(state->TK2); - skinny64_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left */ - s1 = leftRotate4_16(s1); - s2 = leftRotate8_16(s2); - s3 = leftRotate12_16(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny64_inv_sbox(s0); - skinny64_inv_sbox(s1); - skinny64_inv_sbox(s2); - skinny64_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - forkskinny_64_192_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - skinny64_permute_tk(state.TK1); - skinny64_permute_tk(state.TK2); - skinny64_permute_tk(state.TK3); - skinny64_LFSR2(state.TK2[0]); - skinny64_LFSR2(state.TK2[1]); - skinny64_LFSR3(state.TK3[0]); - skinny64_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); - round > (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x1249U; - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_AFTER; ++round) { - skinny64_inv_LFSR2(state.TK2[0]); - skinny64_inv_LFSR2(state.TK2[1]); - skinny64_inv_LFSR3(state.TK3[0]); - skinny64_inv_LFSR3(state.TK3[1]); - skinny64_inv_permute_tk(state.TK1); - skinny64_inv_permute_tk(state.TK2); - skinny64_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&fstate, round); - } - be_store_word16(output_right, fstate.S[0]); - be_store_word16(output_right + 2, fstate.S[1]); - be_store_word16(output_right + 4, fstate.S[2]); - be_store_word16(output_right + 6, fstate.S[3]); -} diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.h deleted file mode 100644 index 0c1a707..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-forkskinny.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_FORKSKINNY_H -#define LW_INTERNAL_FORKSKINNY_H - -/** - * \file internal-forkskinny.h - * \brief ForkSkinny block cipher family. - * - * ForkSkinny is a modified version of the SKINNY block cipher that - * supports "forking": half-way through the rounds the cipher is - * forked in two different directions to produce two different outputs. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-192 also uses this function with a padded tweakey. - */ -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-288 also uses this function with a padded tweakey. - */ -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of input with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left First output block, or NULL if left is not required. - * \param output_right Second output block, or NULL if right is not required. - * \param input 64-bit input block. - */ -/** - * \brief Encrypts a block of plaintext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 64-bit input plaintext block. - */ -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 64-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-skinnyutil.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys/internal-util.h b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys/internal-util.h +++ b/forkae/Implementations/crypto_aead/paefforkskinnyb64t192n48v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/api.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/api.h deleted file mode 100644 index 40ffe7c..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 7 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/encrypt.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/encrypt.c deleted file mode 100644 index 5cbb412..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "forkae.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_saef_128_192_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_saef_128_192_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.c deleted file mode 100644 index 4a9671a..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "forkae.h" -#include "internal-forkskinny.h" -#include "internal-util.h" -#include - -aead_cipher_t const forkae_paef_64_192_cipher = { - "PAEF-ForkSkinny-64-192", - FORKAE_PAEF_64_192_KEY_SIZE, - FORKAE_PAEF_64_192_NONCE_SIZE, - FORKAE_PAEF_64_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_64_192_aead_encrypt, - forkae_paef_64_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_192_cipher = { - "PAEF-ForkSkinny-128-192", - FORKAE_PAEF_128_192_KEY_SIZE, - FORKAE_PAEF_128_192_NONCE_SIZE, - FORKAE_PAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_192_aead_encrypt, - forkae_paef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_256_cipher = { - "PAEF-ForkSkinny-128-256", - FORKAE_PAEF_128_256_KEY_SIZE, - FORKAE_PAEF_128_256_NONCE_SIZE, - FORKAE_PAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_256_aead_encrypt, - forkae_paef_128_256_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_288_cipher = { - "PAEF-ForkSkinny-128-288", - FORKAE_PAEF_128_288_KEY_SIZE, - FORKAE_PAEF_128_288_NONCE_SIZE, - FORKAE_PAEF_128_288_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_288_aead_encrypt, - forkae_paef_128_288_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_192_cipher = { - "SAEF-ForkSkinny-128-192", - FORKAE_SAEF_128_192_KEY_SIZE, - FORKAE_SAEF_128_192_NONCE_SIZE, - FORKAE_SAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_192_aead_encrypt, - forkae_saef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_256_cipher = { - "SAEF-ForkSkinny-128-256", - FORKAE_SAEF_128_256_KEY_SIZE, - FORKAE_SAEF_128_256_NONCE_SIZE, - FORKAE_SAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_256_aead_encrypt, - forkae_saef_128_256_aead_decrypt -}; - -/* PAEF-ForkSkinny-64-192 */ -#define FORKAE_ALG_NAME forkae_paef_64_192 -#define FORKAE_BLOCK_SIZE 8 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_64_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_64_192 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_paef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_paef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_256_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-288 */ -#define FORKAE_ALG_NAME forkae_paef_128_288 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_288_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 7 -#define FORKAE_TWEAKEY_SIZE 48 -#define FORKAE_BLOCK_FUNC forkskinny_128_384 -#include "internal-forkae-paef.h" - -/* SAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_saef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_192_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" - -/* SAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_saef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_256_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.h deleted file mode 100644 index 3e27b50..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/forkae.h +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_FORKAE_H -#define LWCRYPTO_FORKAE_H - -#include "aead-common.h" - -/** - * \file forkae.h - * \brief ForkAE authenticated encryption algorithm family. - * - * ForkAE is a family of authenticated encryption algorithms based on a - * modified version of the SKINNY tweakable block cipher. The modifications - * introduce "forking" where each input block produces two output blocks - * for use in encryption and authentication. There are six members in - * the ForkAE family: - * - * \li PAEF-ForkSkinny-64-192 has a 128-bit key, a 48-bit nonce, and a - * 64-bit authentication tag. The associated data and plaintext are - * limited to 216 bytes. - * \li PAEF-ForkSkinny-128-192 has a 128-bit key, a 48-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-256 has a 128-bit key, a 112-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-288 has a 128-bit key, a 104-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 257 bytes. This is the primary member of the family. - * \li SAEF-ForkSkinny-128-192 has a 128-bit key, a 56-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * \li SAEF-ForkSkinny-128-256 has a 128-bit key, a 120-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * - * The PAEF variants support parallel encryption and decryption for - * higher throughput. The SAEF variants encrypt or decrypt blocks - * sequentially. - * - * ForkAE is designed to be efficient on small packet sizes so most of - * the PAEF algorithms have a limit of 64k or 128k on the amount of - * payload in a single packet. Obviously the input can be split into - * separate packets for larger amounts of data. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_TAG_SIZE 8 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_NONCE_SIZE 14 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_NONCE_SIZE 13 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_NONCE_SIZE 7 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-64-192 cipher. - */ -extern aead_cipher_t const forkae_paef_64_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_paef_128_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_paef_128_256_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-288 cipher. - */ -extern aead_cipher_t const forkae_paef_128_288_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_saef_128_192_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_saef_128_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_64_192_aead_decrypt() - */ -int forkae_paef_64_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_64_192_aead_encrypt() - */ -int forkae_paef_64_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_192_aead_decrypt() - */ -int forkae_paef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_192_aead_encrypt() - */ -int forkae_paef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_256_aead_decrypt() - */ -int forkae_paef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_256_aead_encrypt() - */ -int forkae_paef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_288_aead_decrypt() - */ -int forkae_paef_128_288_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_288_aead_encrypt() - */ -int forkae_paef_128_288_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_192_aead_decrypt() - */ -int forkae_saef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_192_aead_encrypt() - */ -int forkae_saef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_256_aead_decrypt() - */ -int forkae_saef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_256_aead_encrypt() - */ -int forkae_saef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-paef.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-paef.h deleted file mode 100644 index 6f57b2b..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-paef.h +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE PAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_paef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_COUNTER_SIZE Size of the counter value for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Limit on the amount of data we can process based on the counter size */ -#define FORKAE_PAEF_DATA_LIMIT \ - ((unsigned long long)((1ULL << (FORKAE_COUNTER_SIZE * 8)) * \ - (FORKAE_BLOCK_SIZE / 8)) - FORKAE_BLOCK_SIZE) - -/* Processes the associated data in PAEF mode */ -STATIC_INLINE void FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter) - (unsigned char tweakey[FORKAE_TWEAKEY_SIZE], - unsigned long long counter, unsigned char domain) -{ - unsigned posn; - counter |= (((unsigned long long)domain) << (FORKAE_COUNTER_SIZE * 8 - 3)); - for (posn = 0; posn < FORKAE_COUNTER_SIZE; ++posn) { - tweakey[16 + FORKAE_NONCE_SIZE + FORKAE_COUNTER_SIZE - 1 - posn] = - (unsigned char)counter; - counter >>= 8; - } -} - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned long long counter; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || mlen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - counter = 1; - while (mlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned long long counter; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || clen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - counter = 1; - while (clen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, c); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - lw_xor_block_2_src(m, c, tag, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, m); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, sizeof(tag)); - } else { - unsigned temp = (unsigned)clen; - unsigned char block2[FORKAE_BLOCK_SIZE]; - int check; - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - lw_xor_block_2_src(block2, tag, c, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, block2, block, block2); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (block2 + temp, FORKAE_BLOCK_SIZE - temp); - memcpy(m, block2, temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE PAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT -#undef FORKAE_PAEF_DATA_LIMIT diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-saef.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-saef.h deleted file mode 100644 index 768bba4..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkae-saef.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE SAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_saef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_REDUCED_TWEAKEY_SIZE Size of the reduced tweakey without padding. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || mlen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (mlen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - while (mlen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, tag, FORKAE_BLOCK_SIZE); - lw_xor_block(block, m, temp); - block[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || clen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (clen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - while (clen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)clen; - unsigned char mblock[FORKAE_BLOCK_SIZE]; - int check; - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, mblock, block, block); - lw_xor_block(mblock, tag, FORKAE_BLOCK_SIZE); - memcpy(m, mblock, temp); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (mblock + temp, FORKAE_BLOCK_SIZE - temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE SAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_TWEAKEY_REDUCED_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.c deleted file mode 100644 index b050ff1..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.c +++ /dev/null @@ -1,988 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-forkskinny.h" -#include "internal-skinnyutil.h" - -/** - * \brief 7-bit round constants for all ForkSkinny block ciphers. - */ -static unsigned char const RC[87] = { - 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7e, 0x7d, - 0x7b, 0x77, 0x6f, 0x5f, 0x3e, 0x7c, 0x79, 0x73, - 0x67, 0x4f, 0x1e, 0x3d, 0x7a, 0x75, 0x6b, 0x57, - 0x2e, 0x5c, 0x38, 0x70, 0x61, 0x43, 0x06, 0x0d, - 0x1b, 0x37, 0x6e, 0x5d, 0x3a, 0x74, 0x69, 0x53, - 0x26, 0x4c, 0x18, 0x31, 0x62, 0x45, 0x0a, 0x15, - 0x2b, 0x56, 0x2c, 0x58, 0x30, 0x60, 0x41, 0x02, - 0x05, 0x0b, 0x17, 0x2f, 0x5e, 0x3c, 0x78, 0x71, - 0x63, 0x47, 0x0e, 0x1d, 0x3b, 0x76, 0x6d, 0x5b, - 0x36, 0x6c, 0x59, 0x32, 0x64, 0x49, 0x12, 0x25, - 0x4a, 0x14, 0x29, 0x52, 0x24, 0x48, 0x10 -}; - -/** - * \brief Number of rounds of ForkSkinny-128-256 before forking. - */ -#define FORKSKINNY_128_256_ROUNDS_BEFORE 21 - -/** - * \brief Number of rounds of ForkSkinny-128-256 after forking. - */ -#define FORKSKINNY_128_256_ROUNDS_AFTER 27 - -/** - * \brief State information for ForkSkinny-128-256. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_256_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-256. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); -} - -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_BEFORE; ++round) { - forkskinny_128_256_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-256 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_inv_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - forkskinny_128_256_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-128-384 before forking. - */ -#define FORKSKINNY_128_384_ROUNDS_BEFORE 25 - -/** - * \brief Number of rounds of ForkSkinny-128-384 after forking. - */ -#define FORKSKINNY_128_384_ROUNDS_AFTER 31 - -/** - * \brief State information for ForkSkinny-128-384. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t TK3[4]; /**< Third part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_384_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-384. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_permute_tk(state->TK3); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); - skinny128_LFSR3(state->TK3[0]); - skinny128_LFSR3(state->TK3[1]); -} - -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_BEFORE; ++round) { - forkskinny_128_384_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-384 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_inv_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_LFSR3(state->TK3[0]); - skinny128_inv_LFSR3(state->TK3[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - skinny128_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - forkskinny_128_384_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_permute_tk(state.TK3); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - skinny128_LFSR3(state.TK3[0]); - skinny128_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_LFSR3(state.TK3[0]); - skinny128_inv_LFSR3(state.TK3[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - skinny128_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-64-192 before forking. - */ -#define FORKSKINNY_64_192_ROUNDS_BEFORE 17 - -/** - * \brief Number of rounds of ForkSkinny-64-192 after forking. - */ -#define FORKSKINNY_64_192_ROUNDS_AFTER 23 - -/** - * \brief State information for ForkSkinny-64-192. - */ -typedef struct -{ - uint16_t TK1[4]; /**< First part of the tweakey */ - uint16_t TK2[4]; /**< Second part of the tweakey */ - uint16_t TK3[4]; /**< Third part of the tweakey */ - uint16_t S[4]; /**< Current block state */ - -} forkskinny_64_192_state_t; - -/** - * \brief Applies one round of ForkSkinny-64-192. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - * - * Note: The cells of each row are order in big-endian nibble order - * so it is easiest to manage the rows in bit-endian byte order. - */ -static void forkskinny_64_192_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny64_sbox(s0); - skinny64_sbox(s1); - skinny64_sbox(s2); - skinny64_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Shift the cells in the rows right */ - s1 = rightRotate4_16(s1); - s2 = rightRotate8_16(s2); - s3 = rightRotate12_16(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_permute_tk(state->TK1); - skinny64_permute_tk(state->TK2); - skinny64_permute_tk(state->TK3); - skinny64_LFSR2(state->TK2[0]); - skinny64_LFSR2(state->TK2[1]); - skinny64_LFSR3(state->TK3[0]); - skinny64_LFSR3(state->TK3[1]); -} - -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_BEFORE; ++round) { - forkskinny_64_192_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint16_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x1249U; /* Branching constant */ - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-64-192 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_64_192_inv_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_inv_LFSR2(state->TK2[0]); - skinny64_inv_LFSR2(state->TK2[1]); - skinny64_inv_LFSR3(state->TK3[0]); - skinny64_inv_LFSR3(state->TK3[1]); - skinny64_inv_permute_tk(state->TK1); - skinny64_inv_permute_tk(state->TK2); - skinny64_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left */ - s1 = leftRotate4_16(s1); - s2 = leftRotate8_16(s2); - s3 = leftRotate12_16(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny64_inv_sbox(s0); - skinny64_inv_sbox(s1); - skinny64_inv_sbox(s2); - skinny64_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - forkskinny_64_192_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - skinny64_permute_tk(state.TK1); - skinny64_permute_tk(state.TK2); - skinny64_permute_tk(state.TK3); - skinny64_LFSR2(state.TK2[0]); - skinny64_LFSR2(state.TK2[1]); - skinny64_LFSR3(state.TK3[0]); - skinny64_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); - round > (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x1249U; - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_AFTER; ++round) { - skinny64_inv_LFSR2(state.TK2[0]); - skinny64_inv_LFSR2(state.TK2[1]); - skinny64_inv_LFSR3(state.TK3[0]); - skinny64_inv_LFSR3(state.TK3[1]); - skinny64_inv_permute_tk(state.TK1); - skinny64_inv_permute_tk(state.TK2); - skinny64_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&fstate, round); - } - be_store_word16(output_right, fstate.S[0]); - be_store_word16(output_right + 2, fstate.S[1]); - be_store_word16(output_right + 4, fstate.S[2]); - be_store_word16(output_right + 6, fstate.S[3]); -} diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.h deleted file mode 100644 index 0c1a707..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-forkskinny.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_FORKSKINNY_H -#define LW_INTERNAL_FORKSKINNY_H - -/** - * \file internal-forkskinny.h - * \brief ForkSkinny block cipher family. - * - * ForkSkinny is a modified version of the SKINNY block cipher that - * supports "forking": half-way through the rounds the cipher is - * forked in two different directions to produce two different outputs. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-192 also uses this function with a padded tweakey. - */ -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-288 also uses this function with a padded tweakey. - */ -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of input with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left First output block, or NULL if left is not required. - * \param output_right Second output block, or NULL if right is not required. - * \param input 64-bit input block. - */ -/** - * \brief Encrypts a block of plaintext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 64-bit input plaintext block. - */ -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 64-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-skinnyutil.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-util.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys/internal-util.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys/internal-util.h +++ b/forkae/Implementations/crypto_aead/saefforkskinnyb128t192n56v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/api.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/api.h deleted file mode 100644 index 86e276c..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 15 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/encrypt.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/encrypt.c deleted file mode 100644 index 7d59b31..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "forkae.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_saef_128_256_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return forkae_saef_128_256_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.c deleted file mode 100644 index 4a9671a..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "forkae.h" -#include "internal-forkskinny.h" -#include "internal-util.h" -#include - -aead_cipher_t const forkae_paef_64_192_cipher = { - "PAEF-ForkSkinny-64-192", - FORKAE_PAEF_64_192_KEY_SIZE, - FORKAE_PAEF_64_192_NONCE_SIZE, - FORKAE_PAEF_64_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_64_192_aead_encrypt, - forkae_paef_64_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_192_cipher = { - "PAEF-ForkSkinny-128-192", - FORKAE_PAEF_128_192_KEY_SIZE, - FORKAE_PAEF_128_192_NONCE_SIZE, - FORKAE_PAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_192_aead_encrypt, - forkae_paef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_256_cipher = { - "PAEF-ForkSkinny-128-256", - FORKAE_PAEF_128_256_KEY_SIZE, - FORKAE_PAEF_128_256_NONCE_SIZE, - FORKAE_PAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_256_aead_encrypt, - forkae_paef_128_256_aead_decrypt -}; - -aead_cipher_t const forkae_paef_128_288_cipher = { - "PAEF-ForkSkinny-128-288", - FORKAE_PAEF_128_288_KEY_SIZE, - FORKAE_PAEF_128_288_NONCE_SIZE, - FORKAE_PAEF_128_288_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_paef_128_288_aead_encrypt, - forkae_paef_128_288_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_192_cipher = { - "SAEF-ForkSkinny-128-192", - FORKAE_SAEF_128_192_KEY_SIZE, - FORKAE_SAEF_128_192_NONCE_SIZE, - FORKAE_SAEF_128_192_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_192_aead_encrypt, - forkae_saef_128_192_aead_decrypt -}; - -aead_cipher_t const forkae_saef_128_256_cipher = { - "SAEF-ForkSkinny-128-256", - FORKAE_SAEF_128_256_KEY_SIZE, - FORKAE_SAEF_128_256_NONCE_SIZE, - FORKAE_SAEF_128_256_TAG_SIZE, - AEAD_FLAG_NONE, - forkae_saef_128_256_aead_encrypt, - forkae_saef_128_256_aead_decrypt -}; - -/* PAEF-ForkSkinny-64-192 */ -#define FORKAE_ALG_NAME forkae_paef_64_192 -#define FORKAE_BLOCK_SIZE 8 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_64_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_64_192 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_paef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_192_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_paef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_256_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 2 -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-paef.h" - -/* PAEF-ForkSkinny-128-288 */ -#define FORKAE_ALG_NAME forkae_paef_128_288 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_PAEF_128_288_NONCE_SIZE -#define FORKAE_COUNTER_SIZE 7 -#define FORKAE_TWEAKEY_SIZE 48 -#define FORKAE_BLOCK_FUNC forkskinny_128_384 -#include "internal-forkae-paef.h" - -/* SAEF-ForkSkinny-128-192 */ -#define FORKAE_ALG_NAME forkae_saef_128_192 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_192_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 24 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" - -/* SAEF-ForkSkinny-128-256 */ -#define FORKAE_ALG_NAME forkae_saef_128_256 -#define FORKAE_BLOCK_SIZE 16 -#define FORKAE_NONCE_SIZE FORKAE_SAEF_128_256_NONCE_SIZE -#define FORKAE_TWEAKEY_SIZE 32 -#define FORKAE_TWEAKEY_REDUCED_SIZE 32 -#define FORKAE_BLOCK_FUNC forkskinny_128_256 -#include "internal-forkae-saef.h" diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.h deleted file mode 100644 index 3e27b50..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/forkae.h +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_FORKAE_H -#define LWCRYPTO_FORKAE_H - -#include "aead-common.h" - -/** - * \file forkae.h - * \brief ForkAE authenticated encryption algorithm family. - * - * ForkAE is a family of authenticated encryption algorithms based on a - * modified version of the SKINNY tweakable block cipher. The modifications - * introduce "forking" where each input block produces two output blocks - * for use in encryption and authentication. There are six members in - * the ForkAE family: - * - * \li PAEF-ForkSkinny-64-192 has a 128-bit key, a 48-bit nonce, and a - * 64-bit authentication tag. The associated data and plaintext are - * limited to 216 bytes. - * \li PAEF-ForkSkinny-128-192 has a 128-bit key, a 48-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-256 has a 128-bit key, a 112-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 217 bytes. - * \li PAEF-ForkSkinny-128-288 has a 128-bit key, a 104-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext are - * limited to 257 bytes. This is the primary member of the family. - * \li SAEF-ForkSkinny-128-192 has a 128-bit key, a 56-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * \li SAEF-ForkSkinny-128-256 has a 128-bit key, a 120-bit nonce, and a - * 128-bit authentication tag. The associated data and plaintext may be - * unlimited in size. - * - * The PAEF variants support parallel encryption and decryption for - * higher throughput. The SAEF variants encrypt or decrypt blocks - * sequentially. - * - * ForkAE is designed to be efficient on small packet sizes so most of - * the PAEF algorithms have a limit of 64k or 128k on the amount of - * payload in a single packet. Obviously the input can be split into - * separate packets for larger amounts of data. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_TAG_SIZE 8 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-64-192. - */ -#define FORKAE_PAEF_64_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-192. - */ -#define FORKAE_PAEF_128_192_NONCE_SIZE 6 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-256. - */ -#define FORKAE_PAEF_128_256_NONCE_SIZE 14 - -/** - * \brief Size of the key for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PAEF-ForkSkinny-128-288. - */ -#define FORKAE_PAEF_128_288_NONCE_SIZE 13 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-192. - */ -#define FORKAE_SAEF_128_192_NONCE_SIZE 7 - -/** - * \brief Size of the key for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SAEF-ForkSkinny-128-256. - */ -#define FORKAE_SAEF_128_256_NONCE_SIZE 15 - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-64-192 cipher. - */ -extern aead_cipher_t const forkae_paef_64_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_paef_128_192_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_paef_128_256_cipher; - -/** - * \brief Meta-information block for the PAEF-ForkSkinny-128-288 cipher. - */ -extern aead_cipher_t const forkae_paef_128_288_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-192 cipher. - */ -extern aead_cipher_t const forkae_saef_128_192_cipher; - -/** - * \brief Meta-information block for the SAEF-ForkSkinny-128-256 cipher. - */ -extern aead_cipher_t const forkae_saef_128_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_64_192_aead_decrypt() - */ -int forkae_paef_64_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-64-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_64_192_aead_encrypt() - */ -int forkae_paef_64_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_192_aead_decrypt() - */ -int forkae_paef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 6 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_192_aead_encrypt() - */ -int forkae_paef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_256_aead_decrypt() - */ -int forkae_paef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 14 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_256_aead_encrypt() - */ -int forkae_paef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_paef_128_288_aead_decrypt() - */ -int forkae_paef_128_288_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PAEF-ForkSkinny-128-288. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 13 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_paef_128_288_aead_encrypt() - */ -int forkae_paef_128_288_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_192_aead_decrypt() - */ -int forkae_saef_128_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 7 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_192_aead_encrypt() - */ -int forkae_saef_128_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa forkae_saef_128_256_aead_decrypt() - */ -int forkae_saef_128_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SAEF-ForkSkinny-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 15 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa forkae_saef_128_256_aead_encrypt() - */ -int forkae_saef_128_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-paef.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-paef.h deleted file mode 100644 index 6f57b2b..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-paef.h +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE PAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_paef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_COUNTER_SIZE Size of the counter value for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Limit on the amount of data we can process based on the counter size */ -#define FORKAE_PAEF_DATA_LIMIT \ - ((unsigned long long)((1ULL << (FORKAE_COUNTER_SIZE * 8)) * \ - (FORKAE_BLOCK_SIZE / 8)) - FORKAE_BLOCK_SIZE) - -/* Processes the associated data in PAEF mode */ -STATIC_INLINE void FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter) - (unsigned char tweakey[FORKAE_TWEAKEY_SIZE], - unsigned long long counter, unsigned char domain) -{ - unsigned posn; - counter |= (((unsigned long long)domain) << (FORKAE_COUNTER_SIZE * 8 - 3)); - for (posn = 0; posn < FORKAE_COUNTER_SIZE; ++posn) { - tweakey[16 + FORKAE_NONCE_SIZE + FORKAE_COUNTER_SIZE - 1 - posn] = - (unsigned char)counter; - counter >>= 8; - } -} - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned long long counter; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || mlen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - counter = 1; - while (mlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, m); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned long long counter; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Validate the size of the associated data and plaintext as there - * is a limit on the size of the PAEF counter field */ - if (adlen > FORKAE_PAEF_DATA_LIMIT || clen > FORKAE_PAEF_DATA_LIMIT) - return -2; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - - /* Tag value starts at zero. We will XOR this with all of the - * intermediate tag values that are calculated for each block */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - counter = 1; - while (adlen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 0); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - ++counter; - } - if (adlen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 1); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, ad); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, sizeof(block) - temp - 1); - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 3); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, block, block); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - counter = 1; - while (clen > FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 4); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, c); - lw_xor_block(tag, block, FORKAE_BLOCK_SIZE); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - ++counter; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 5); - lw_xor_block_2_src(m, c, tag, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, m); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, sizeof(tag)); - } else { - unsigned temp = (unsigned)clen; - unsigned char block2[FORKAE_BLOCK_SIZE]; - int check; - FORKAE_CONCAT(FORKAE_ALG_NAME,_set_counter)(tweakey, counter, 7); - lw_xor_block_2_src(block2, tag, c, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, block2, block, block2); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (block2 + temp, FORKAE_BLOCK_SIZE - temp); - memcpy(m, block2, temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE PAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT -#undef FORKAE_PAEF_DATA_LIMIT diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-saef.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-saef.h deleted file mode 100644 index 768bba4..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkae-saef.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ForkAE SAEF variant. - * - * FORKAE_ALG_NAME Name of the FORKAE algorithm; e.g. forkae_saef_128_256 - * FORKAE_BLOCK_SIZE Size of the block for the cipher (8 or 16 bytes). - * FORKAE_NONCE_SIZE Size of the nonce for the cipher in bytes. - * FORKAE_TWEAKEY_SIZE Size of the tweakey for the underlying forked cipher. - * FORKAE_REDUCED_TWEAKEY_SIZE Size of the reduced tweakey without padding. - * FORKAE_BLOCK_FUNC Name of the block function; e.g. forkskinny_128_256 - */ -#if defined(FORKAE_ALG_NAME) - -#define FORKAE_CONCAT_INNER(name,suffix) name##suffix -#define FORKAE_CONCAT(name,suffix) FORKAE_CONCAT_INNER(name,suffix) - -/* Check that the last block is padded correctly; -1 if ok, 0 if not */ -STATIC_INLINE int FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (const unsigned char *block, unsigned len) -{ - int check = block[0] ^ 0x80; - while (len > 1) { - --len; - check |= block[len]; - } - return (check - 1) >> 8; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + FORKAE_BLOCK_SIZE; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || mlen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (mlen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || mlen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then generate the tag and we are done */ - if (!mlen) { - memcpy(c, tag, sizeof(tag)); - return 0; - } - - /* Encrypt all plaintext blocks except the last */ - while (mlen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - mlen -= FORKAE_BLOCK_SIZE; - } - - /* Encrypt the last block and generate the final authentication tag */ - if (mlen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, m, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)mlen; - memcpy(block, tag, FORKAE_BLOCK_SIZE); - lw_xor_block(block, m, temp); - block[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, c, block, block); - lw_xor_block(c, tag, FORKAE_BLOCK_SIZE); - memcpy(c + FORKAE_BLOCK_SIZE, block, temp); - } - return 0; -} - -int FORKAE_CONCAT(FORKAE_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char tweakey[FORKAE_TWEAKEY_SIZE]; - unsigned char tag[FORKAE_BLOCK_SIZE]; - unsigned char block[FORKAE_BLOCK_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < FORKAE_BLOCK_SIZE) - return -1; - clen -= FORKAE_BLOCK_SIZE; - *mlen = clen; - - /* Format the initial tweakey with the key and nonce */ - memcpy(tweakey, k, 16); - memcpy(tweakey + 16, npub, FORKAE_NONCE_SIZE); - memset(tweakey + 16 + FORKAE_NONCE_SIZE, 0, - FORKAE_TWEAKEY_SIZE - 16 - FORKAE_NONCE_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] = 0x08; - - /* Tag value starts at zero */ - memset(tag, 0, sizeof(tag)); - - /* Process the associated data */ - if (adlen > 0 || clen == 0) { - while (adlen > FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - ad += FORKAE_BLOCK_SIZE; - adlen -= FORKAE_BLOCK_SIZE; - } - if (clen == 0) - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x02; - if (adlen == FORKAE_BLOCK_SIZE) { - lw_xor_block(tag, ad, FORKAE_BLOCK_SIZE); - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } else if (adlen != 0 || clen == 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(tag, ad, temp); - tag[temp] ^= 0x80; - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_encrypt)(tweakey, 0, tag, tag); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - } - } - - /* If there is no message payload, then check the tag and we are done */ - if (!clen) - return aead_check_tag(m, clen, tag, c, sizeof(tag)); - - /* Decrypt all ciphertext blocks except the last */ - while (clen > FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x01; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - memcpy(tag, block, FORKAE_BLOCK_SIZE); - memset(tweakey + 16, 0, FORKAE_TWEAKEY_SIZE - 16); - c += FORKAE_BLOCK_SIZE; - m += FORKAE_BLOCK_SIZE; - clen -= FORKAE_BLOCK_SIZE; - } - - /* Decrypt the last block and check the final authentication tag */ - if (clen == FORKAE_BLOCK_SIZE) { - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x04; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt)(tweakey, m, block, block); - lw_xor_block(m, tag, FORKAE_BLOCK_SIZE); - return aead_check_tag - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, FORKAE_BLOCK_SIZE); - } else { - unsigned temp = (unsigned)clen; - unsigned char mblock[FORKAE_BLOCK_SIZE]; - int check; - lw_xor_block_2_src(block, c, tag, FORKAE_BLOCK_SIZE); - tweakey[FORKAE_TWEAKEY_REDUCED_SIZE - 1] ^= 0x05; - FORKAE_CONCAT(FORKAE_BLOCK_FUNC,_decrypt) - (tweakey, mblock, block, block); - lw_xor_block(mblock, tag, FORKAE_BLOCK_SIZE); - memcpy(m, mblock, temp); - check = FORKAE_CONCAT(FORKAE_ALG_NAME,_is_padding) - (mblock + temp, FORKAE_BLOCK_SIZE - temp); - return aead_check_tag_precheck - (mtemp, *mlen, block, c + FORKAE_BLOCK_SIZE, temp, check); - } -} - -#endif /* FORKAE_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ForkAE SAEF algorithm */ -#undef FORKAE_ALG_NAME -#undef FORKAE_BLOCK_SIZE -#undef FORKAE_NONCE_SIZE -#undef FORKAE_COUNTER_SIZE -#undef FORKAE_TWEAKEY_SIZE -#undef FORKAE_TWEAKEY_REDUCED_SIZE -#undef FORKAE_BLOCK_FUNC -#undef FORKAE_CONCAT_INNER -#undef FORKAE_CONCAT diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.c b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.c deleted file mode 100644 index b050ff1..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.c +++ /dev/null @@ -1,988 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-forkskinny.h" -#include "internal-skinnyutil.h" - -/** - * \brief 7-bit round constants for all ForkSkinny block ciphers. - */ -static unsigned char const RC[87] = { - 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7e, 0x7d, - 0x7b, 0x77, 0x6f, 0x5f, 0x3e, 0x7c, 0x79, 0x73, - 0x67, 0x4f, 0x1e, 0x3d, 0x7a, 0x75, 0x6b, 0x57, - 0x2e, 0x5c, 0x38, 0x70, 0x61, 0x43, 0x06, 0x0d, - 0x1b, 0x37, 0x6e, 0x5d, 0x3a, 0x74, 0x69, 0x53, - 0x26, 0x4c, 0x18, 0x31, 0x62, 0x45, 0x0a, 0x15, - 0x2b, 0x56, 0x2c, 0x58, 0x30, 0x60, 0x41, 0x02, - 0x05, 0x0b, 0x17, 0x2f, 0x5e, 0x3c, 0x78, 0x71, - 0x63, 0x47, 0x0e, 0x1d, 0x3b, 0x76, 0x6d, 0x5b, - 0x36, 0x6c, 0x59, 0x32, 0x64, 0x49, 0x12, 0x25, - 0x4a, 0x14, 0x29, 0x52, 0x24, 0x48, 0x10 -}; - -/** - * \brief Number of rounds of ForkSkinny-128-256 before forking. - */ -#define FORKSKINNY_128_256_ROUNDS_BEFORE 21 - -/** - * \brief Number of rounds of ForkSkinny-128-256 after forking. - */ -#define FORKSKINNY_128_256_ROUNDS_AFTER 27 - -/** - * \brief State information for ForkSkinny-128-256. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_256_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-256. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); -} - -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_BEFORE; ++round) { - forkskinny_128_256_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-256 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_256_inv_round - (forkskinny_128_256_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_256_state_t state; - forkskinny_128_256_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_256_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_256_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_256_ROUNDS_BEFORE; - round < (FORKSKINNY_128_256_ROUNDS_BEFORE + - FORKSKINNY_128_256_ROUNDS_AFTER); ++round) { - forkskinny_128_256_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-128-384 before forking. - */ -#define FORKSKINNY_128_384_ROUNDS_BEFORE 25 - -/** - * \brief Number of rounds of ForkSkinny-128-384 after forking. - */ -#define FORKSKINNY_128_384_ROUNDS_AFTER 31 - -/** - * \brief State information for ForkSkinny-128-384. - */ -typedef struct -{ - uint32_t TK1[4]; /**< First part of the tweakey */ - uint32_t TK2[4]; /**< Second part of the tweakey */ - uint32_t TK3[4]; /**< Third part of the tweakey */ - uint32_t S[4]; /**< Current block state */ - -} forkskinny_128_384_state_t; - -/** - * \brief Applies one round of ForkSkinny-128-384. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(state->TK1); - skinny128_permute_tk(state->TK2); - skinny128_permute_tk(state->TK3); - skinny128_LFSR2(state->TK2[0]); - skinny128_LFSR2(state->TK2[1]); - skinny128_LFSR3(state->TK3[0]); - skinny128_LFSR3(state->TK3[1]); -} - -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_BEFORE; ++round) { - forkskinny_128_384_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint32_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x08040201U; /* Branching constant */ - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&state, round); - } - le_store_word32(output_right, state.S[0]); - le_store_word32(output_right + 4, state.S[1]); - le_store_word32(output_right + 8, state.S[2]); - le_store_word32(output_right + 12, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-128-384 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_128_384_inv_round - (forkskinny_128_384_state_t *state, unsigned round) -{ - uint32_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1 and TK2 for the next round */ - skinny128_inv_LFSR2(state->TK2[0]); - skinny128_inv_LFSR2(state->TK2[1]); - skinny128_inv_LFSR3(state->TK3[0]); - skinny128_inv_LFSR3(state->TK3[1]); - skinny128_inv_permute_tk(state->TK1); - skinny128_inv_permute_tk(state->TK2); - skinny128_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left, which moves the cell - * values down closer to the LSB. That is, we do a right - * rotate on the word to rotate the cells in the word left */ - s1 = rightRotate8(s1); - s2 = rightRotate16(s2); - s3 = rightRotate24(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - (rc & 0x0F) ^ 0x00020000; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_128_384_state_t state; - forkskinny_128_384_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = le_load_word32(key); - state.TK1[1] = le_load_word32(key + 4); - state.TK1[2] = le_load_word32(key + 8); - state.TK1[3] = le_load_word32(key + 12); - state.TK2[0] = le_load_word32(key + 16); - state.TK2[1] = le_load_word32(key + 20); - state.TK2[2] = le_load_word32(key + 24); - state.TK2[3] = le_load_word32(key + 28); - state.TK3[0] = le_load_word32(key + 32); - state.TK3[1] = le_load_word32(key + 36); - state.TK3[2] = le_load_word32(key + 40); - state.TK3[3] = le_load_word32(key + 44); - state.S[0] = le_load_word32(input); - state.S[1] = le_load_word32(input + 4); - state.S[2] = le_load_word32(input + 8); - state.S[3] = le_load_word32(input + 12); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); ++round) { - skinny128_permute_tk(state.TK1); - skinny128_permute_tk(state.TK2); - skinny128_permute_tk(state.TK3); - skinny128_LFSR2(state.TK2[0]); - skinny128_LFSR2(state.TK2[1]); - skinny128_LFSR3(state.TK3[0]); - skinny128_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER * 2); - round > (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x08040201U; - state.S[1] ^= 0x82412010U; - state.S[2] ^= 0x28140a05U; - state.S[3] ^= 0x8844a251U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_128_384_ROUNDS_AFTER; ++round) { - skinny128_inv_LFSR2(state.TK2[0]); - skinny128_inv_LFSR2(state.TK2[1]); - skinny128_inv_LFSR3(state.TK3[0]); - skinny128_inv_LFSR3(state.TK3[1]); - skinny128_inv_permute_tk(state.TK1); - skinny128_inv_permute_tk(state.TK2); - skinny128_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_128_384_inv_round(&state, round - 1); - } - le_store_word32(output_left, state.S[0]); - le_store_word32(output_left + 4, state.S[1]); - le_store_word32(output_left + 8, state.S[2]); - le_store_word32(output_left + 12, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_128_384_ROUNDS_BEFORE; - round < (FORKSKINNY_128_384_ROUNDS_BEFORE + - FORKSKINNY_128_384_ROUNDS_AFTER); ++round) { - forkskinny_128_384_round(&fstate, round); - } - le_store_word32(output_right, fstate.S[0]); - le_store_word32(output_right + 4, fstate.S[1]); - le_store_word32(output_right + 8, fstate.S[2]); - le_store_word32(output_right + 12, fstate.S[3]); -} - -/** - * \brief Number of rounds of ForkSkinny-64-192 before forking. - */ -#define FORKSKINNY_64_192_ROUNDS_BEFORE 17 - -/** - * \brief Number of rounds of ForkSkinny-64-192 after forking. - */ -#define FORKSKINNY_64_192_ROUNDS_AFTER 23 - -/** - * \brief State information for ForkSkinny-64-192. - */ -typedef struct -{ - uint16_t TK1[4]; /**< First part of the tweakey */ - uint16_t TK2[4]; /**< Second part of the tweakey */ - uint16_t TK3[4]; /**< Third part of the tweakey */ - uint16_t S[4]; /**< Current block state */ - -} forkskinny_64_192_state_t; - -/** - * \brief Applies one round of ForkSkinny-64-192. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - * - * Note: The cells of each row are order in big-endian nibble order - * so it is easiest to manage the rows in bit-endian byte order. - */ -static void forkskinny_64_192_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Apply the S-box to all cells in the state */ - skinny64_sbox(s0); - skinny64_sbox(s1); - skinny64_sbox(s2); - skinny64_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Shift the cells in the rows right */ - s1 = rightRotate4_16(s1); - s2 = rightRotate8_16(s2); - s3 = rightRotate12_16(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_permute_tk(state->TK1); - skinny64_permute_tk(state->TK2); - skinny64_permute_tk(state->TK3); - skinny64_LFSR2(state->TK2[0]); - skinny64_LFSR2(state->TK2[1]); - skinny64_LFSR3(state->TK3[0]); - skinny64_LFSR3(state->TK3[1]); -} - -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Run all of the rounds before the forking point */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_BEFORE; ++round) { - forkskinny_64_192_round(&state, round); - } - - /* Determine which output blocks we need */ - if (output_left && output_right) { - /* We need both outputs so save the state at the forking point */ - uint16_t F[4]; - F[0] = state.S[0]; - F[1] = state.S[1]; - F[2] = state.S[2]; - F[3] = state.S[3]; - - /* Generate the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - - /* Restore the state at the forking point */ - state.S[0] = F[0]; - state.S[1] = F[1]; - state.S[2] = F[2]; - state.S[3] = F[3]; - } - if (output_left) { - /* Generate the left output block */ - state.S[0] ^= 0x1249U; /* Branching constant */ - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - } else { - /* We only need the right output block */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&state, round); - } - be_store_word16(output_right, state.S[0]); - be_store_word16(output_right + 2, state.S[1]); - be_store_word16(output_right + 4, state.S[2]); - be_store_word16(output_right + 6, state.S[3]); - } -} - -/** - * \brief Applies one round of ForkSkinny-64-192 in reverse. - * - * \param state State to apply the round to. - * \param round Number of the round to apply. - */ -static void forkskinny_64_192_inv_round - (forkskinny_64_192_state_t *state, unsigned round) -{ - uint16_t s0, s1, s2, s3, temp; - uint8_t rc; - - /* Load the state into local variables */ - s0 = state->S[0]; - s1 = state->S[1]; - s2 = state->S[2]; - s3 = state->S[3]; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny64_inv_LFSR2(state->TK2[0]); - skinny64_inv_LFSR2(state->TK2[1]); - skinny64_inv_LFSR3(state->TK3[0]); - skinny64_inv_LFSR3(state->TK3[1]); - skinny64_inv_permute_tk(state->TK1); - skinny64_inv_permute_tk(state->TK2); - skinny64_inv_permute_tk(state->TK3); - - /* Inverse mix of the columns */ - temp = s0; - s0 = s1; - s1 = s2; - s2 = s3; - s3 = temp ^ s2; - s2 ^= s0; - s1 ^= s2; - - /* Shift the cells in the rows left */ - s1 = leftRotate4_16(s1); - s2 = leftRotate8_16(s2); - s3 = leftRotate12_16(s3); - - /* XOR the round constant and the subkey for this round */ - rc = RC[round]; - s0 ^= state->TK1[0] ^ state->TK2[0] ^ state->TK3[0] ^ - ((rc & 0x0F) << 12) ^ 0x0020; - s1 ^= state->TK1[1] ^ state->TK2[1] ^ state->TK3[1] ^ - ((rc & 0x70) << 8); - s2 ^= 0x2000; - - /* Apply the inverse of the S-box to all cells in the state */ - skinny64_inv_sbox(s0); - skinny64_inv_sbox(s1); - skinny64_inv_sbox(s2); - skinny64_inv_sbox(s3); - - /* Save the local variables back to the state */ - state->S[0] = s0; - state->S[1] = s1; - state->S[2] = s2; - state->S[3] = s3; -} - -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input) -{ - forkskinny_64_192_state_t state; - forkskinny_64_192_state_t fstate; - unsigned round; - - /* Unpack the tweakey and the input */ - state.TK1[0] = be_load_word16(key); - state.TK1[1] = be_load_word16(key + 2); - state.TK1[2] = be_load_word16(key + 4); - state.TK1[3] = be_load_word16(key + 6); - state.TK2[0] = be_load_word16(key + 8); - state.TK2[1] = be_load_word16(key + 10); - state.TK2[2] = be_load_word16(key + 12); - state.TK2[3] = be_load_word16(key + 14); - state.TK3[0] = be_load_word16(key + 16); - state.TK3[1] = be_load_word16(key + 18); - state.TK3[2] = be_load_word16(key + 20); - state.TK3[3] = be_load_word16(key + 22); - state.S[0] = be_load_word16(input); - state.S[1] = be_load_word16(input + 2); - state.S[2] = be_load_word16(input + 4); - state.S[3] = be_load_word16(input + 6); - - /* Fast-forward the tweakey to the end of the key schedule */ - for (round = 0; round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); ++round) { - skinny64_permute_tk(state.TK1); - skinny64_permute_tk(state.TK2); - skinny64_permute_tk(state.TK3); - skinny64_LFSR2(state.TK2[0]); - skinny64_LFSR2(state.TK2[1]); - skinny64_LFSR3(state.TK3[0]); - skinny64_LFSR3(state.TK3[1]); - } - - /* Perform the "after" rounds on the input to get back - * to the forking point in the cipher */ - for (round = (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER * 2); - round > (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - - /* Remove the branching constant */ - state.S[0] ^= 0x1249U; - state.S[1] ^= 0x36daU; - state.S[2] ^= 0x5b7fU; - state.S[3] ^= 0xec81U; - - /* Roll the tweakey back another "after" rounds */ - for (round = 0; round < FORKSKINNY_64_192_ROUNDS_AFTER; ++round) { - skinny64_inv_LFSR2(state.TK2[0]); - skinny64_inv_LFSR2(state.TK2[1]); - skinny64_inv_LFSR3(state.TK3[0]); - skinny64_inv_LFSR3(state.TK3[1]); - skinny64_inv_permute_tk(state.TK1); - skinny64_inv_permute_tk(state.TK2); - skinny64_inv_permute_tk(state.TK3); - } - - /* Save the state and the tweakey at the forking point */ - fstate = state; - - /* Generate the left output block after another "before" rounds */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; round > 0; --round) { - forkskinny_64_192_inv_round(&state, round - 1); - } - be_store_word16(output_left, state.S[0]); - be_store_word16(output_left + 2, state.S[1]); - be_store_word16(output_left + 4, state.S[2]); - be_store_word16(output_left + 6, state.S[3]); - - /* Generate the right output block by going forward "after" - * rounds from the forking point */ - for (round = FORKSKINNY_64_192_ROUNDS_BEFORE; - round < (FORKSKINNY_64_192_ROUNDS_BEFORE + - FORKSKINNY_64_192_ROUNDS_AFTER); ++round) { - forkskinny_64_192_round(&fstate, round); - } - be_store_word16(output_right, fstate.S[0]); - be_store_word16(output_right + 2, fstate.S[1]); - be_store_word16(output_right + 4, fstate.S[2]); - be_store_word16(output_right + 6, fstate.S[3]); -} diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.h deleted file mode 100644 index 0c1a707..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-forkskinny.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_FORKSKINNY_H -#define LW_INTERNAL_FORKSKINNY_H - -/** - * \file internal-forkskinny.h - * \brief ForkSkinny block cipher family. - * - * ForkSkinny is a modified version of the SKINNY block cipher that - * supports "forking": half-way through the rounds the cipher is - * forked in two different directions to produce two different outputs. - * - * References: https://www.esat.kuleuven.be/cosic/forkae/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-192 also uses this function with a padded tweakey. - */ -void forkskinny_128_256_encrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-256. - * - * \param key 256-bit tweakey for ForkSkinny-128-256. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_256_decrypt - (const unsigned char key[32], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of plaintext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 128-bit input plaintext block. - * - * ForkSkinny-128-288 also uses this function with a padded tweakey. - */ -void forkskinny_128_384_encrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-128-384. - * - * \param key 384-bit tweakey for ForkSkinny-128-384. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 128-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_128_384_decrypt - (const unsigned char key[48], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Encrypts a block of input with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left First output block, or NULL if left is not required. - * \param output_right Second output block, or NULL if right is not required. - * \param input 64-bit input block. - */ -/** - * \brief Encrypts a block of plaintext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block for the ciphertext, or NULL if - * the left output is not required. - * \param output_right Right output block for the authentication tag, - * or NULL if the right output is not required. - * \param input 64-bit input plaintext block. - */ -void forkskinny_64_192_encrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -/** - * \brief Decrypts a block of ciphertext with ForkSkinny-64-192. - * - * \param key 192-bit tweakey for ForkSkinny-64-192. - * \param output_left Left output block, which is the plaintext. - * \param output_right Right output block for the authentication tag. - * \param input 64-bit input ciphertext block. - * - * Both output blocks will be populated; neither is optional. - */ -void forkskinny_64_192_decrypt - (const unsigned char key[24], unsigned char *output_left, - unsigned char *output_right, const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-skinnyutil.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-util.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys/internal-util.h b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys/internal-util.h +++ b/forkae/Implementations/crypto_aead/saefforkskinnyb128t256n120v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.c b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/api.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/encrypt.c b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/encrypt.c deleted file mode 100644 index 1286684..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "gift-cofb.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return gift_cofb_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return gift_cofb_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.c b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.c deleted file mode 100644 index 6f65524..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.c +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "gift-cofb.h" -#include "internal-gift128.h" -#include "internal-util.h" -#include - -aead_cipher_t const gift_cofb_cipher = { - "GIFT-COFB", - GIFT_COFB_KEY_SIZE, - GIFT_COFB_NONCE_SIZE, - GIFT_COFB_TAG_SIZE, - AEAD_FLAG_NONE, - gift_cofb_aead_encrypt, - gift_cofb_aead_decrypt -}; - -/** - * \brief Structure of an L value. - * - * The value is assumed to have already been converted from big-endian - * to host byte order. - */ -typedef struct -{ - uint32_t x; /**< High word of the value */ - uint32_t y; /**< Low word of the value */ - -} gift_cofb_l_t; - -/** - * \brief Structure of a 128-bit block in host byte order. - * - * The block is assumed to have already been converted from big-endian - * to host byte order. - */ -typedef union -{ - uint32_t x[4]; /**< Words of the block */ - uint8_t y[16]; /**< Bytes of the block */ - -} gift_cofb_block_t; - -/** - * \brief Doubles an L value in the F(2^64) field. - * - * \param L The value to be doubled. - * - * L = L << 1 if the top-most bit is 0, or L = (L << 1) ^ 0x1B otherwise. - */ -#define gift_cofb_double_L(L) \ - do { \ - uint32_t mask = ((int32_t)((L)->x)) >> 31; \ - (L)->x = ((L)->x << 1) | ((L)->y >> 31); \ - (L)->y = ((L)->y << 1) ^ (mask & 0x1B); \ - } while (0) - -/** - * \brief Triples an L value in the F(2^64) field. - * - * \param L The value to be tripled. - * - * L = double(L) ^ L - */ -#define gift_cofb_triple_L(L) \ - do { \ - uint32_t mask = ((int32_t)((L)->x)) >> 31; \ - uint32_t tx = ((L)->x << 1) | ((L)->y >> 31); \ - uint32_t ty = ((L)->y << 1) ^ (mask & 0x1B); \ - (L)->x ^= tx; \ - (L)->y ^= ty; \ - } while (0) - -/** - * \brief Applies the GIFT-COFB feedback function to Y. - * - * \param Y The value to be modified with the feedback function. - * - * Y is divided into L and R halves and then (R, L <<< 1) is returned. - */ -#define gift_cofb_feedback(Y) \ - do { \ - uint32_t lx = (Y)->x[0]; \ - uint32_t ly = (Y)->x[1]; \ - (Y)->x[0] = (Y)->x[2]; \ - (Y)->x[1] = (Y)->x[3]; \ - (Y)->x[2] = (lx << 1) | (ly >> 31); \ - (Y)->x[3] = (ly << 1) | (lx >> 31); \ - } while (0) - -/** - * \brief Process the associated data for GIFT-COFB encryption or decryption. - * - * \param ks The GIFT-128 key schedule to use. - * \param Y GIFT-COFB internal state. - * \param L GIFT-COFB internal state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param mlen Length of the plaintext in bytes. - */ -static void gift_cofb_assoc_data - (gift128b_key_schedule_t *ks, gift_cofb_block_t *Y, gift_cofb_l_t *L, - const unsigned char *ad, unsigned long long adlen, unsigned long long mlen) -{ - /* Deal with all associated data blocks except the last */ - while (adlen > 16) { - gift_cofb_double_L(L); - gift_cofb_feedback(Y); - Y->x[0] ^= L->x ^ be_load_word32(ad); - Y->x[1] ^= L->y ^ be_load_word32(ad + 4); - Y->x[2] ^= be_load_word32(ad + 8); - Y->x[3] ^= be_load_word32(ad + 12); - gift128b_encrypt_preloaded(ks, Y->x, Y->x); - ad += 16; - adlen -= 16; - } - - /* Pad and deal with the last block */ - gift_cofb_feedback(Y); - if (adlen == 16) { - Y->x[0] ^= be_load_word32(ad); - Y->x[1] ^= be_load_word32(ad + 4); - Y->x[2] ^= be_load_word32(ad + 8); - Y->x[3] ^= be_load_word32(ad + 12); - gift_cofb_triple_L(L); - } else { - unsigned temp = (unsigned)adlen; - unsigned char padded[16]; - memcpy(padded, ad, temp); - padded[temp] = 0x80; - memset(padded + temp + 1, 0, 16 - temp - 1); - Y->x[0] ^= be_load_word32(padded); - Y->x[1] ^= be_load_word32(padded + 4); - Y->x[2] ^= be_load_word32(padded + 8); - Y->x[3] ^= be_load_word32(padded + 12); - gift_cofb_triple_L(L); - gift_cofb_triple_L(L); - } - if (mlen == 0) { - gift_cofb_triple_L(L); - gift_cofb_triple_L(L); - } - Y->x[0] ^= L->x; - Y->x[1] ^= L->y; - gift128b_encrypt_preloaded(ks, Y->x, Y->x); -} - -/** @cond cofb_byte_swap */ - -/* Byte-swap a block if the platform is little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define gift_cofb_byte_swap_word(y) \ - (__extension__ ({ \ - uint32_t _y = (y); \ - (_y >> 24) | (_y << 24) | ((_y << 8) & 0x00FF0000U) | \ - ((_y >> 8) & 0x0000FF00U); \ - })) -#define gift_cofb_byte_swap(x) \ - do { \ - (x)[0] = gift_cofb_byte_swap_word((x)[0]); \ - (x)[1] = gift_cofb_byte_swap_word((x)[1]); \ - (x)[2] = gift_cofb_byte_swap_word((x)[2]); \ - (x)[3] = gift_cofb_byte_swap_word((x)[3]); \ - } while (0) -#else -#define gift_cofb_byte_swap(x) do { ; } while (0) -#endif - -/** @endcond */ - -int gift_cofb_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gift128b_key_schedule_t ks; - gift_cofb_block_t Y; - gift_cofb_l_t L; - gift_cofb_block_t P; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + GIFT_COFB_TAG_SIZE; - - /* Set up the key schedule and use it to encrypt the nonce */ - gift128b_init(&ks, k); - Y.x[0] = be_load_word32(npub); - Y.x[1] = be_load_word32(npub + 4); - Y.x[2] = be_load_word32(npub + 8); - Y.x[3] = be_load_word32(npub + 12); - gift128b_encrypt_preloaded(&ks, Y.x, Y.x); - L.x = Y.x[0]; - L.y = Y.x[1]; - - /* Authenticate the associated data */ - gift_cofb_assoc_data(&ks, &Y, &L, ad, adlen, mlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - /* Deal with all plaintext blocks except the last */ - while (mlen > 16) { - P.x[0] = be_load_word32(m); - P.x[1] = be_load_word32(m + 4); - P.x[2] = be_load_word32(m + 8); - P.x[3] = be_load_word32(m + 12); - be_store_word32(c, Y.x[0] ^ P.x[0]); - be_store_word32(c + 4, Y.x[1] ^ P.x[1]); - be_store_word32(c + 8, Y.x[2] ^ P.x[2]); - be_store_word32(c + 12, Y.x[3] ^ P.x[3]); - gift_cofb_double_L(&L); - gift_cofb_feedback(&Y); - Y.x[0] ^= L.x ^ P.x[0]; - Y.x[1] ^= L.y ^ P.x[1]; - Y.x[2] ^= P.x[2]; - Y.x[3] ^= P.x[3]; - gift128b_encrypt_preloaded(&ks, Y.x, Y.x); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and deal with the last plaintext block */ - if (mlen == 16) { - P.x[0] = be_load_word32(m); - P.x[1] = be_load_word32(m + 4); - P.x[2] = be_load_word32(m + 8); - P.x[3] = be_load_word32(m + 12); - be_store_word32(c, Y.x[0] ^ P.x[0]); - be_store_word32(c + 4, Y.x[1] ^ P.x[1]); - be_store_word32(c + 8, Y.x[2] ^ P.x[2]); - be_store_word32(c + 12, Y.x[3] ^ P.x[3]); - gift_cofb_feedback(&Y); - Y.x[0] ^= P.x[0]; - Y.x[1] ^= P.x[1]; - Y.x[2] ^= P.x[2]; - Y.x[3] ^= P.x[3]; - gift_cofb_triple_L(&L); - c += 16; - } else { - unsigned temp = (unsigned)mlen; - gift_cofb_block_t padded; - memcpy(padded.y, m, temp); - padded.y[temp] = 0x80; - memset(padded.y + temp + 1, 0, 16 - temp - 1); - P.x[0] = be_load_word32(padded.y); - P.x[1] = be_load_word32(padded.y + 4); - P.x[2] = be_load_word32(padded.y + 8); - P.x[3] = be_load_word32(padded.y + 12); - gift_cofb_byte_swap(padded.x); - padded.x[0] ^= Y.x[0]; - padded.x[1] ^= Y.x[1]; - padded.x[2] ^= Y.x[2]; - padded.x[3] ^= Y.x[3]; - gift_cofb_byte_swap(padded.x); - memcpy(c, padded.y, temp); - gift_cofb_feedback(&Y); - Y.x[0] ^= P.x[0]; - Y.x[1] ^= P.x[1]; - Y.x[2] ^= P.x[2]; - Y.x[3] ^= P.x[3]; - gift_cofb_triple_L(&L); - gift_cofb_triple_L(&L); - c += temp; - } - Y.x[0] ^= L.x; - Y.x[1] ^= L.y; - gift128b_encrypt_preloaded(&ks, Y.x, Y.x); - } - - /* Generate the final authentication tag */ - be_store_word32(c, Y.x[0]); - be_store_word32(c + 4, Y.x[1]); - be_store_word32(c + 8, Y.x[2]); - be_store_word32(c + 12, Y.x[3]); - return 0; -} - -int gift_cofb_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gift128b_key_schedule_t ks; - gift_cofb_block_t Y; - gift_cofb_l_t L; - gift_cofb_block_t P; - unsigned char *mtemp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < GIFT_COFB_TAG_SIZE) - return -1; - *mlen = clen - GIFT_COFB_TAG_SIZE; - - /* Set up the key schedule and use it to encrypt the nonce */ - gift128b_init(&ks, k); - Y.x[0] = be_load_word32(npub); - Y.x[1] = be_load_word32(npub + 4); - Y.x[2] = be_load_word32(npub + 8); - Y.x[3] = be_load_word32(npub + 12); - gift128b_encrypt_preloaded(&ks, Y.x, Y.x); - L.x = Y.x[0]; - L.y = Y.x[1]; - - /* Authenticate the associated data */ - gift_cofb_assoc_data(&ks, &Y, &L, ad, adlen, *mlen); - - /* Decrypt the ciphertext to produce the plaintext */ - mtemp = m; - clen -= GIFT_COFB_TAG_SIZE; - if (clen > 0) { - /* Deal with all ciphertext blocks except the last */ - while (clen > 16) { - P.x[0] = Y.x[0] ^ be_load_word32(c); - P.x[1] = Y.x[1] ^ be_load_word32(c + 4); - P.x[2] = Y.x[2] ^ be_load_word32(c + 8); - P.x[3] = Y.x[3] ^ be_load_word32(c + 12); - be_store_word32(m, P.x[0]); - be_store_word32(m + 4, P.x[1]); - be_store_word32(m + 8, P.x[2]); - be_store_word32(m + 12, P.x[3]); - gift_cofb_double_L(&L); - gift_cofb_feedback(&Y); - Y.x[0] ^= L.x ^ P.x[0]; - Y.x[1] ^= L.y ^ P.x[1]; - Y.x[2] ^= P.x[2]; - Y.x[3] ^= P.x[3]; - gift128b_encrypt_preloaded(&ks, Y.x, Y.x); - c += 16; - m += 16; - clen -= 16; - } - - /* Pad and deal with the last ciphertext block */ - if (clen == 16) { - P.x[0] = Y.x[0] ^ be_load_word32(c); - P.x[1] = Y.x[1] ^ be_load_word32(c + 4); - P.x[2] = Y.x[2] ^ be_load_word32(c + 8); - P.x[3] = Y.x[3] ^ be_load_word32(c + 12); - be_store_word32(m, P.x[0]); - be_store_word32(m + 4, P.x[1]); - be_store_word32(m + 8, P.x[2]); - be_store_word32(m + 12, P.x[3]); - gift_cofb_feedback(&Y); - Y.x[0] ^= P.x[0]; - Y.x[1] ^= P.x[1]; - Y.x[2] ^= P.x[2]; - Y.x[3] ^= P.x[3]; - gift_cofb_triple_L(&L); - c += 16; - } else { - unsigned temp = (unsigned)clen; - P.x[0] = Y.x[0]; - P.x[1] = Y.x[1]; - P.x[2] = Y.x[2]; - P.x[3] = Y.x[3]; - gift_cofb_byte_swap(P.x); - lw_xor_block_2_dest(m, P.y, c, temp); - P.y[temp] = 0x80; - memset(P.y + temp + 1, 0, 16 - temp - 1); - gift_cofb_byte_swap(P.x); - gift_cofb_feedback(&Y); - Y.x[0] ^= P.x[0]; - Y.x[1] ^= P.x[1]; - Y.x[2] ^= P.x[2]; - Y.x[3] ^= P.x[3]; - gift_cofb_triple_L(&L); - gift_cofb_triple_L(&L); - c += temp; - } - Y.x[0] ^= L.x; - Y.x[1] ^= L.y; - gift128b_encrypt_preloaded(&ks, Y.x, Y.x); - } - - /* Check the authentication tag at the end of the packet */ - gift_cofb_byte_swap(Y.x); - return aead_check_tag(mtemp, *mlen, Y.y, c, GIFT_COFB_TAG_SIZE); -} diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.h deleted file mode 100644 index 670d042..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/gift-cofb.h +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_GIFT_COFB_H -#define LWCRYPTO_GIFT_COFB_H - -#include "aead-common.h" - -/** - * \file gift-cofb.h - * \brief GIFT-COFB authenticated encryption algorithm. - * - * GIFT-COFB is an authenticated encryption algorithm that combines - * the COFB (COmbined FeedBack) block cipher mode with the GIFT-128 - * block cipher. The algorithm has a 128-bit key, a 128-bit nonce, - * and a 128-bit authentication tag. - * - * References: https://www.isical.ac.in/~lightweight/COFB/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for GIFT-COFB. - */ -#define GIFT_COFB_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all GIFT-COFB family members. - */ -#define GIFT_COFB_TAG_SIZE 16 - -/** - * \brief Size of the nonce for GIFT-COFB. - */ -#define GIFT_COFB_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the GIFT-COFB cipher. - */ -extern aead_cipher_t const gift_cofb_cipher; - -/** - * \brief Encrypts and authenticates a packet with GIFT-COFB. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa gift_cofb_aead_decrypt() - */ -int gift_cofb_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with GIFT-COFB-0. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa gift_cofb_aead_encrypt() - */ -int gift_cofb_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128-config.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128-config.h deleted file mode 100644 index 62131ba..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128-config.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_CONFIG_H -#define LW_INTERNAL_GIFT128_CONFIG_H - -/** - * \file internal-gift128-config.h - * \brief Configures the variant of GIFT-128 to use. - */ - -/** - * \brief Select the full variant of GIFT-128. - * - * The full variant requires 320 bytes for the key schedule and uses the - * fixslicing method to implement encryption and decryption. - */ -#define GIFT128_VARIANT_FULL 0 - -/** - * \brief Select the small variant of GIFT-128. - * - * The small variant requires 80 bytes for the key schedule. The rest - * of the key schedule is expanded on the fly during encryption. - * - * The fixslicing method is used to implement encryption and the slower - * bitslicing method is used to implement decryption. The small variant - * is suitable when memory is at a premium, decryption is not needed, - * but encryption performance is still important. - */ -#define GIFT128_VARIANT_SMALL 1 - -/** - * \brief Select the tiny variant of GIFT-128. - * - * The tiny variant requires 16 bytes for the key schedule and uses the - * bitslicing method to implement encryption and decryption. It is suitable - * for use when memory is very tight and performance is not critical. - */ -#define GIFT128_VARIANT_TINY 2 - -/** - * \def GIFT128_VARIANT - * \brief Selects the default variant of GIFT-128 to use on this platform. - */ -/** - * \def GIFT128_VARIANT_ASM - * \brief Defined to 1 if the GIFT-128 implementation has been replaced - * with an assembly code version. - */ -#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 1 -#endif -#if !defined(GIFT128_VARIANT) -#define GIFT128_VARIANT GIFT128_VARIANT_FULL -#endif -#if !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 0 -#endif - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.c b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.c deleted file mode 100644 index c6ac5ec..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift128.h" -#include "internal-util.h" - -#if !GIFT128_VARIANT_ASM - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC_fixsliced[40] = { - 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, - 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, - 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, - 0x03020180, 0x8000002b, 0x10080880, 0x60014000, 0x01400002, 0x02020080, - 0x80000021, 0x10000080, 0x0001c000, 0x51000002, 0x03010180, 0x8000002e, - 0x10088800, 0x60012000, 0x40500002, 0x01030080, 0x80000006, 0x10008808, - 0xc001a000, 0x14500002, 0x01020181, 0x8000001a -}; - -#endif - -#if GIFT128_VARIANT != GIFT128_VARIANT_FULL - -/* Round constants for GIFT-128 in the bitsliced representation */ -static uint8_t const GIFT128_RC[40] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, - 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A -}; - -#endif - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 8 bits with respect to the next: - * - * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 - * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 - * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 - * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 - * - * The most efficient permutation from the online generator was P3, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P3 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate8(_x); \ - } while (0) -#define PERM1(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate16(_x); \ - } while (0) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate24(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -#define INV_PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x00550055, 9); \ - bit_permute_step(x, 0x00003333, 18); \ - bit_permute_step(x, 0x000f000f, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate8(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) \ - do { \ - uint32_t _x = rightRotate16(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate24(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = (x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); -} - -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); -} - -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift128b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t tmp = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= tmp; \ - (a) ^= tmp << (shift); \ - } while (0) - -/** - * \brief Derives the next 10 fixsliced keys in the key schedule. - * - * \param next Points to the buffer to receive the next 10 keys. - * \param prev Points to the buffer holding the previous 10 keys. - * - * The \a next and \a prev buffers are allowed to be the same. - */ -#define gift128b_derive_keys(next, prev) \ - do { \ - /* Key 0 */ \ - uint32_t s = (prev)[0]; \ - uint32_t t = (prev)[1]; \ - gift128b_swap_move(t, t, 0x00003333U, 16); \ - gift128b_swap_move(t, t, 0x55554444U, 1); \ - (next)[0] = t; \ - /* Key 1 */ \ - s = leftRotate8(s & 0x33333333U) | leftRotate16(s & 0xCCCCCCCCU); \ - gift128b_swap_move(s, s, 0x55551100U, 1); \ - (next)[1] = s; \ - /* Key 2 */ \ - s = (prev)[2]; \ - t = (prev)[3]; \ - (next)[2] = ((t >> 4) & 0x0F000F00U) | ((t & 0x0F000F00U) << 4) | \ - ((t >> 6) & 0x00030003U) | ((t & 0x003F003FU) << 2); \ - /* Key 3 */ \ - (next)[3] = ((s >> 6) & 0x03000300U) | ((s & 0x3F003F00U) << 2) | \ - ((s >> 5) & 0x00070007U) | ((s & 0x001F001FU) << 3); \ - /* Key 4 */ \ - s = (prev)[4]; \ - t = (prev)[5]; \ - (next)[4] = leftRotate8(t & 0xAAAAAAAAU) | \ - leftRotate16(t & 0x55555555U); \ - /* Key 5 */ \ - (next)[5] = leftRotate8(s & 0x55555555U) | \ - leftRotate12(s & 0xAAAAAAAAU); \ - /* Key 6 */ \ - s = (prev)[6]; \ - t = (prev)[7]; \ - (next)[6] = ((t >> 2) & 0x03030303U) | ((t & 0x03030303U) << 2) | \ - ((t >> 1) & 0x70707070U) | ((t & 0x10101010U) << 3); \ - /* Key 7 */ \ - (next)[7] = ((s >> 18) & 0x00003030U) | ((s & 0x01010101U) << 3) | \ - ((s >> 14) & 0x0000C0C0U) | ((s & 0x0000E0E0U) << 15) | \ - ((s >> 1) & 0x07070707U) | ((s & 0x00001010U) << 19); \ - /* Key 8 */ \ - s = (prev)[8]; \ - t = (prev)[9]; \ - (next)[8] = ((t >> 4) & 0x0FFF0000U) | ((t & 0x000F0000U) << 12) | \ - ((t >> 8) & 0x000000FFU) | ((t & 0x000000FFU) << 8); \ - /* Key 9 */ \ - (next)[9] = ((s >> 6) & 0x03FF0000U) | ((s & 0x003F0000U) << 10) | \ - ((s >> 4) & 0x00000FFFU) | ((s & 0x0000000FU) << 12); \ - } while (0) - -/** - * \brief Compute the round keys for GIFT-128 in the fixsliced representation. - * - * \param ks Points to the key schedule to initialize. - * \param k0 First key word. - * \param k1 Second key word. - * \param k2 Third key word. - * \param k3 Fourth key word. - */ -static void gift128b_compute_round_keys - (gift128b_key_schedule_t *ks, - uint32_t k0, uint32_t k1, uint32_t k2, uint32_t k3) -{ - unsigned index; - uint32_t temp; - - /* Set the regular key with k0 and k3 pre-swapped for the round function */ - ks->k[0] = k3; - ks->k[1] = k1; - ks->k[2] = k2; - ks->k[3] = k0; - - /* Pre-compute the keys for rounds 3..10 and permute into fixsliced form */ - for (index = 4; index < 20; index += 2) { - ks->k[index] = ks->k[index - 3]; - temp = ks->k[index - 4]; - temp = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - ks->k[index + 1] = temp; - } - for (index = 0; index < 20; index += 10) { - /* Keys 0 and 10 */ - temp = ks->k[index]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index] = temp; - - /* Keys 1 and 11 */ - temp = ks->k[index + 1]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 1] = temp; - - /* Keys 2 and 12 */ - temp = ks->k[index + 2]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 2] = temp; - - /* Keys 3 and 13 */ - temp = ks->k[index + 3]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 3] = temp; - - /* Keys 4 and 14 */ - temp = ks->k[index + 4]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 4] = temp; - - /* Keys 5 and 15 */ - temp = ks->k[index + 5]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 5] = temp; - - /* Keys 6 and 16 */ - temp = ks->k[index + 6]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 6] = temp; - - /* Keys 7 and 17 */ - temp = ks->k[index + 7]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 7] = temp; - - /* Keys 8, 9, 18, and 19 do not need any adjustment */ - } - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - /* Derive the fixsliced keys for the remaining rounds 11..40 */ - for (index = 20; index < 80; index += 10) { - gift128b_derive_keys(ks->k + index, ks->k + index - 20); - } -#endif -} - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - gift128b_compute_round_keys - (ks, be_load_word32(key), be_load_word32(key + 4), - be_load_word32(key + 8), be_load_word32(key + 12)); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission */ - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); -} - -/** - * \brief Performs the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s3 ^= 0xFFFFFFFFU; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s0 ^= 0xFFFFFFFFU; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/** - * \brief Permutes the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 3) & 0x11111111U) | ((s2 & 0x77777777U) << 1); \ - s3 = ((s3 >> 1) & 0x77777777U) | ((s3 & 0x11111111U) << 3); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 4) & 0x0FFF0FFFU) | ((s0 & 0x000F000FU) << 12); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 12) & 0x000F000FU) | ((s2 & 0x0FFF0FFFU) << 4); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s3 = leftRotate16(s3); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 6) & 0x03030303U) | ((s0 & 0x3F3F3F3FU) << 2); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 2) & 0x3F3F3F3FU) | ((s2 & 0x03030303U) << 6); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = rightRotate8(s2); \ - s3 = leftRotate8(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 1) & 0x77777777U) | ((s2 & 0x11111111U) << 3); \ - s3 = ((s3 >> 3) & 0x11111111U) | ((s3 & 0x77777777U) << 1); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 12) & 0x000F000FU) | ((s0 & 0x0FFF0FFFU) << 4); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 4) & 0x0FFF0FFFU) | ((s2 & 0x000F000FU) << 12); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - s3 = leftRotate16(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 2) & 0x3F3F3F3FU) | ((s0 & 0x03030303U) << 6); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 6) & 0x03030303U) | ((s2 & 0x3F3F3F3FU) << 2); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = leftRotate8(s2); \ - s3 = rightRotate8(s3); \ - } while (0); - -/** - * \brief Performs five fixsliced encryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of five rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 5 rounds. - */ -#define gift128b_encrypt_5_rounds(rk, rc) \ - do { \ - /* 1st round - S-box, rotate left, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_1(s0, s1, s2, s3); \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - \ - /* 2nd round - S-box, rotate up, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_2(s0, s1, s2, s3); \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_3(s0, s1, s2, s3); \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - \ - /* 4th round - S-box, rotate left and swap rows, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_4(s0, s1, s2, s3); \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - \ - /* 5th round - S-box, rotate up, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_5(s0, s1, s2, s3); \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - \ - /* Swap s0 and s3 in preparation for the next 1st round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - } while (0) - -/** - * \brief Performs five fixsliced decryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - */ -#define gift128b_decrypt_5_rounds(rk, rc) \ - do { \ - /* Swap s0 and s3 in preparation for the next 5th round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - \ - /* 5th round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - gift128b_inv_permute_state_5(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 4th round - S-box, rotate right and swap rows, add round key */ \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - gift128b_inv_permute_state_4(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - gift128b_inv_permute_state_3(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 2nd round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - gift128b_inv_permute_state_2(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 1st round - S-box, rotate right, add round key */ \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - gift128b_inv_permute_state_1(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - } while (0) - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - /* Mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = be_load_word32(key + 12); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission - * and mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = le_load_word32(key); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key + 12); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if (((round + 1) % 5) == 0 && round < 39) - s0 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -/* The small variant uses fixslicing for encryption, but we need to change - * to bitslicing for decryption because of the difficulty of fast-forwarding - * the fixsliced key schedule to the end. So the tiny variant is used for - * decryption when the small variant is selected. Since the NIST AEAD modes - * for GIFT-128 only use the block encrypt operation, the inefficiencies - * in decryption don't matter all that much */ - -/** - * \def gift128b_load_and_forward_schedule() - * \brief Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 40; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 10 times for the full 40 rounds. The overall - * effect is to apply a "20 right and 40 left" bit-rotation to every - * word in the key schedule. That is equivalent to "4 right and 8 left" - * on the 16-bit sub-words. - */ -#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#else -/* The small variant needs to also undo some of the rotations that were - * done to generate the fixsliced version of the key schedule */ -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ - gift128b_swap_move(w3, w3, 0x00003333U, 18); \ - gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ - gift128b_swap_move(w3, w3, 0x00550055U, 9); \ - gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ - gift128b_swap_move(w1, w1, 0x00003333U, 18); \ - gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ - gift128b_swap_move(w1, w1, 0x00550055U, 9); \ - gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ - gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ - gift128b_swap_move(w2, w2, 0x03030303U, 6); \ - gift128b_swap_move(w2, w2, 0x11111111U, 3); \ - gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ - gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ - gift128b_swap_move(w0, w0, 0x03030303U, 6); \ - gift128b_swap_move(w0, w0, 0x11111111U, 3); \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#endif - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if ((round % 5) == 0 && round < 40) - s0 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -#endif /* !GIFT128_VARIANT_ASM */ diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.h deleted file mode 100644 index f57d143..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_H -#define LW_INTERNAL_GIFT128_H - -/** - * \file internal-gift128.h - * \brief GIFT-128 block cipher. - * - * There are three versions of GIFT-128 in use within the second round - * submissions to the NIST lightweight cryptography competition. - * - * The most efficient version for 32-bit software implementation is the - * GIFT-128-b bit-sliced version from GIFT-COFB and SUNDAE-GIFT. - * - * The second is the nibble-based version from HYENA. We implement the - * HYENA version as a wrapper around the bit-sliced version. - * - * The third version is a variant on the HYENA nibble-based version that - * includes a 4-bit tweak value for domain separation. It is used by - * the ESTATE submission to NIST. - * - * Technically there is a fourth version of GIFT-128 which is the one that - * appeared in the original GIFT-128 paper. It is almost the same as the - * HYENA version except that the byte ordering is big-endian instead of - * HYENA's little-endian. The original version of GIFT-128 doesn't appear - * in any of the NIST submissions so we don't bother with it in this library. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include -#include "internal-gift128-config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of a GIFT-128 block in bytes. - */ -#define GIFT128_BLOCK_SIZE 16 - -/** - * \var GIFT128_ROUND_KEYS - * \brief Number of round keys for the GIFT-128 key schedule. - */ -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY -#define GIFT128_ROUND_KEYS 4 -#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL -#define GIFT128_ROUND_KEYS 20 -#else -#define GIFT128_ROUND_KEYS 80 -#endif - -/** - * \brief Structure of the key schedule for GIFT-128 (bit-sliced). - */ -typedef struct -{ - /** Pre-computed round keys for bit-sliced GIFT-128 */ - uint32_t k[GIFT128_ROUND_KEYS]; - -} gift128b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced and pre-loaded). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version assumes that the input has already been pre-loaded from - * big-endian into host byte order in the supplied word array. The output - * is delivered in the same way. - */ -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Structure of the key schedule for GIFT-128 (nibble-based). - */ -typedef gift128b_key_schedule_t gift128n_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ -#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ -#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ -#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ -#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ -#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ -#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ -#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ -#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ -#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ -#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ -#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ -#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ -#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ -#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ -#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ -#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ - -/** - * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -/** - * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-avr.S deleted file mode 100644 index 641613a..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-avr.S +++ /dev/null @@ -1,2104 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 40 -table_0: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-full-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-full-avr.S deleted file mode 100644 index ff11875..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-full-avr.S +++ /dev/null @@ -1,5037 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r13,X+ - ld r12,X+ - ld r11,X+ - ld r10,X+ - ld r5,X+ - ld r4,X+ - ld r3,X+ - ld r2,X+ - ld r9,X+ - ld r8,X+ - ld r7,X+ - ld r6,X+ - ld r29,X+ - ld r28,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - ldi r24,4 -33: - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r29 - ror r28 - ror r0 - lsr r29 - ror r28 - ror r0 - or r29,r0 - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r28 - mov r28,r4 - mov r4,r0 - mov r0,r29 - mov r29,r5 - mov r5,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - mov r0,r6 - mov r6,r10 - mov r10,r0 - mov r0,r7 - mov r7,r11 - mov r11,r0 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - st Z,r29 - std Z+1,r23 - std Z+2,r28 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+4,r29 - std Z+5,r23 - std Z+6,r28 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r28,Z+10 - ldd r29,Z+11 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+8,r29 - std Z+9,r23 - std Z+10,r28 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+12,r29 - std Z+13,r23 - std Z+14,r28 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+16,r29 - std Z+17,r23 - std Z+18,r28 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+20,r29 - std Z+21,r23 - std Z+22,r28 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+24,r29 - std Z+25,r23 - std Z+26,r28 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+28,r29 - std Z+29,r23 - std Z+30,r28 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - adiw r30,40 - movw r26,r30 - subi r26,80 - sbc r27,r1 - ldi r24,6 -1274: - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r2 - eor r19,r3 - andi r18,51 - andi r19,51 - eor r2,r18 - eor r3,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - movw r18,r22 - movw r20,r28 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - andi r28,204 - andi r29,204 - or r28,r21 - or r29,r18 - or r22,r19 - or r23,r20 - movw r18,r28 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r28 - eor r19,r29 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r28 - std Z+5,r29 - std Z+6,r22 - std Z+7,r23 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - swap r3 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - swap r5 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r29 - adc r29,r1 - lsl r29 - adc r29,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r28 - std Z+15,r29 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - ldi r25,85 - and r2,r25 - and r3,r25 - and r4,r25 - and r5,r25 - or r2,r19 - or r3,r20 - or r4,r21 - or r5,r18 - std Z+16,r4 - std Z+17,r5 - std Z+18,r2 - std Z+19,r3 - movw r18,r22 - movw r20,r28 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - andi r28,170 - andi r29,170 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - or r22,r18 - or r23,r19 - or r28,r20 - or r29,r21 - std Z+20,r29 - std Z+21,r22 - std Z+22,r23 - std Z+23,r28 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r14,r18 - movw r16,r20 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - eor r14,r18 - eor r15,r19 - eor r16,r20 - eor r17,r21 - ldi r25,8 - and r14,r25 - and r15,r25 - andi r16,8 - andi r17,8 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - ldi r17,15 - and r2,r17 - and r3,r17 - and r4,r17 - and r5,r17 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - movw r18,r28 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r2,r22 - movw r4,r28 - ldi r16,1 - and r2,r16 - and r3,r16 - and r4,r16 - and r5,r16 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - or r2,r18 - or r3,r19 - movw r18,r28 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r2,r18 - or r3,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r4,r18 - or r5,r19 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r4,r22 - or r5,r23 - std Z+28,r2 - std Z+29,r3 - std Z+30,r4 - std Z+31,r5 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - std Z+32,r3 - std Z+33,r2 - std Z+34,r4 - std Z+35,r5 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r28 - mov r28,r29 - mov r29,r0 - lsl r28 - rol r29 - adc r28,r1 - lsl r28 - rol r29 - adc r28,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r28 - std Z+39,r29 - dec r24 - breq 1733f - adiw r30,40 - rjmp 1274b -1733: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,160 - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rjmp 768f -30: - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r1 - lsr r22 - ror r0 - lsr r22 - ror r0 - or r22,r0 - mov r0,r1 - lsr r23 - ror r0 - lsr r23 - ror r0 - or r23,r0 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - swap r4 - swap r5 - swap r6 - swap r7 - lsl r8 - adc r8,r1 - lsl r8 - adc r8,r1 - lsl r9 - adc r9,r1 - lsl r9 - adc r9,r1 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,119 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,17 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -768: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-small-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-small-avr.S deleted file mode 100644 index 77ef9fd..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-small-avr.S +++ /dev/null @@ -1,6053 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -33: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -678: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - cpse r16,r1 - rjmp 678b - rjmp 1175f -830: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1175: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-tiny-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-tiny-avr.S deleted file mode 100644 index e7a03f1..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-gift128b-tiny-avr.S +++ /dev/null @@ -1,6766 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z,r22 - std Z+1,r23 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-util.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/gift-cofb.c b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/gift-cofb.c index ed70e07..6f65524 100644 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/gift-cofb.c +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/gift-cofb.c @@ -206,8 +206,7 @@ int gift_cofb_aead_encrypt *clen = mlen + GIFT_COFB_TAG_SIZE; /* Set up the key schedule and use it to encrypt the nonce */ - if (!gift128b_init(&ks, k, GIFT_COFB_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); Y.x[0] = be_load_word32(npub); Y.x[1] = be_load_word32(npub + 4); Y.x[2] = be_load_word32(npub + 8); @@ -320,8 +319,7 @@ int gift_cofb_aead_decrypt *mlen = clen - GIFT_COFB_TAG_SIZE; /* Set up the key schedule and use it to encrypt the nonce */ - if (!gift128b_init(&ks, k, GIFT_COFB_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); Y.x[0] = be_load_word32(npub); Y.x[1] = be_load_word32(npub + 4); Y.x[2] = be_load_word32(npub + 8); diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128-config.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128-config.h new file mode 100644 index 0000000..62131ba --- /dev/null +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128-config.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIFT128_CONFIG_H +#define LW_INTERNAL_GIFT128_CONFIG_H + +/** + * \file internal-gift128-config.h + * \brief Configures the variant of GIFT-128 to use. + */ + +/** + * \brief Select the full variant of GIFT-128. + * + * The full variant requires 320 bytes for the key schedule and uses the + * fixslicing method to implement encryption and decryption. + */ +#define GIFT128_VARIANT_FULL 0 + +/** + * \brief Select the small variant of GIFT-128. + * + * The small variant requires 80 bytes for the key schedule. The rest + * of the key schedule is expanded on the fly during encryption. + * + * The fixslicing method is used to implement encryption and the slower + * bitslicing method is used to implement decryption. The small variant + * is suitable when memory is at a premium, decryption is not needed, + * but encryption performance is still important. + */ +#define GIFT128_VARIANT_SMALL 1 + +/** + * \brief Select the tiny variant of GIFT-128. + * + * The tiny variant requires 16 bytes for the key schedule and uses the + * bitslicing method to implement encryption and decryption. It is suitable + * for use when memory is very tight and performance is not critical. + */ +#define GIFT128_VARIANT_TINY 2 + +/** + * \def GIFT128_VARIANT + * \brief Selects the default variant of GIFT-128 to use on this platform. + */ +/** + * \def GIFT128_VARIANT_ASM + * \brief Defined to 1 if the GIFT-128 implementation has been replaced + * with an assembly code version. + */ +#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 1 +#endif +#if !defined(GIFT128_VARIANT) +#define GIFT128_VARIANT GIFT128_VARIANT_FULL +#endif +#if !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 0 +#endif + +#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.c b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.c index 681dbc8..c6ac5ec 100644 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.c +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.c @@ -23,8 +23,12 @@ #include "internal-gift128.h" #include "internal-util.h" +#if !GIFT128_VARIANT_ASM + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC[40] = { +static uint32_t const GIFT128_RC_fixsliced[40] = { 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, @@ -34,6 +38,246 @@ static uint32_t const GIFT128_RC[40] = { 0xc001a000, 0x14500002, 0x01020181, 0x8000001a }; +#endif + +#if GIFT128_VARIANT != GIFT128_VARIANT_FULL + +/* Round constants for GIFT-128 in the bitsliced representation */ +static uint8_t const GIFT128_RC[40] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, + 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A +}; + +#endif + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 8 bits with respect to the next: + * + * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 + * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 + * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 + * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 + * + * The most efficient permutation from the online generator was P3, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P3 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate8(_x); \ + } while (0) +#define PERM1(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate16(_x); \ + } while (0) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate24(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +#define INV_PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x00550055, 9); \ + bit_permute_step(x, 0x00003333, 18); \ + bit_permute_step(x, 0x000f000f, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate8(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) \ + do { \ + uint32_t _x = rightRotate16(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate24(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = (x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +/** + * \brief Converts the GIFT-128 nibble-based representation into word-based. + * + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. + * + * The \a input and \a output buffers can be the same buffer. + */ +static void gift128n_to_words + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input buffer into 32-bit words. We use the nibble order + * from the HYENA submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-128 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 12); + s1 = le_load_word32(input + 8); + s2 = le_load_word32(input + 4); + s3 = le_load_word32(input); + + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + PERM_WORDS(s2); + PERM_WORDS(s3); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)s2; + output[3] = (uint8_t)s3; + output[4] = (uint8_t)(s0 >> 8); + output[5] = (uint8_t)(s1 >> 8); + output[6] = (uint8_t)(s2 >> 8); + output[7] = (uint8_t)(s3 >> 8); + output[8] = (uint8_t)(s0 >> 16); + output[9] = (uint8_t)(s1 >> 16); + output[10] = (uint8_t)(s2 >> 16); + output[11] = (uint8_t)(s3 >> 16); + output[12] = (uint8_t)(s0 >> 24); + output[13] = (uint8_t)(s1 >> 24); + output[14] = (uint8_t)(s2 >> 24); + output[15] = (uint8_t)(s3 >> 24); +} + +/** + * \brief Converts the GIFT-128 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift128n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s3 contains the least significant */ + s0 = (((uint32_t)(input[12])) << 24) | + (((uint32_t)(input[8])) << 16) | + (((uint32_t)(input[4])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[13])) << 24) | + (((uint32_t)(input[9])) << 16) | + (((uint32_t)(input[5])) << 8) | + ((uint32_t)(input[1])); + s2 = (((uint32_t)(input[14])) << 24) | + (((uint32_t)(input[10])) << 16) | + (((uint32_t)(input[6])) << 8) | + ((uint32_t)(input[2])); + s3 = (((uint32_t)(input[15])) << 24) | + (((uint32_t)(input[11])) << 16) | + (((uint32_t)(input[7])) << 8) | + ((uint32_t)(input[3])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + INV_PERM_WORDS(s2); + INV_PERM_WORDS(s3); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 12, s0); + le_store_word32(output + 8, s1); + le_store_word32(output + 4, s2); + le_store_word32(output, s3); +} + +void gift128n_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_encrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +void gift128n_decrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_decrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /** * \brief Swaps bits within two words. * @@ -202,21 +446,27 @@ static void gift128b_compute_round_keys /* Keys 8, 9, 18, and 19 do not need any adjustment */ } +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL /* Derive the fixsliced keys for the remaining rounds 11..40 */ for (index = 20; index < 80; index += 10) { gift128b_derive_keys(ks->k + index, ks->k + index - 20); } +#endif } -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) { - if (!ks || !key || key_len != 16) - return 0; gift128b_compute_round_keys (ks, be_load_word32(key), be_load_word32(key + 4), be_load_word32(key + 8), be_load_word32(key + 12)); - return 1; +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission */ + gift128b_compute_round_keys + (ks, le_load_word32(key + 12), le_load_word32(key + 8), + le_load_word32(key + 4), le_load_word32(key)); } /** @@ -521,11 +771,37 @@ int gift128b_init gift128b_inv_sbox(s3, s1, s2, s0); \ } while (0) +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) +{ + /* Mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = be_load_word32(key + 12); + ks->k[1] = be_load_word32(key + 4); + ks->k[2] = be_load_word32(key + 8); + ks->k[3] = be_load_word32(key); +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission + * and mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = le_load_word32(key); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key + 12); +} + +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into the state buffer and convert from big endian */ s0 = be_load_word32(input); @@ -534,14 +810,20 @@ void gift128b_encrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -555,6 +837,7 @@ void gift128b_encrypt_preloaded const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into local variables */ s0 = input[0]; @@ -563,14 +846,20 @@ void gift128b_encrypt_preloaded s3 = input[3]; /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer */ output[0] = s0; @@ -579,7 +868,55 @@ void gift128b_encrypt_preloaded output[3] = s3; } -void gift128b_decrypt +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; + uint32_t k[20]; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { @@ -592,14 +929,14 @@ void gift128b_decrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -608,173 +945,308 @@ void gift128b_decrypt be_store_word32(output + 12, s3); } -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { - /* Use the little-endian key byte order from the HYENA submission */ - if (!ks || !key || key_len != 16) - return 0; - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); - return 1; + uint32_t s0, s1, s2, s3; + + /* Copy the plaintext into local variables */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; + + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_encrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); } -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); + /* Copy the plaintext into the state buffer */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -void gift128n_encrypt +void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) + const unsigned char *input, uint32_t tweak) { + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if (((round + 1) % 5) == 0 && round < 39) + s0 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} + uint32_t s0, s1, s2, s3; -/* 4-bit tweak values expanded to 32-bit */ -static uint32_t const GIFT128_tweaks[16] = { - 0x00000000, 0xe1e1e1e1, 0xd2d2d2d2, 0x33333333, - 0xb4b4b4b4, 0x55555555, 0x66666666, 0x87878787, - 0x78787878, 0x99999999, 0xaaaaaaaa, 0x4b4b4b4b, - 0xcccccccc, 0x2d2d2d2d, 0x1e1e1e1e, 0xffffffff -}; + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); -void gift128t_encrypt + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + +void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; - /* Copy the plaintext into the state buffer and convert from nibbles */ + /* Copy the ciphertext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); @@ -782,25 +1254,24 @@ void gift128t_encrypt s3 = be_load_word32(output + 12); /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + * Every 5 rounds except the first we add the tweak value to the state */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - /* Pack the state into the ciphertext buffer in nibble form */ + /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); @@ -808,37 +1279,211 @@ void gift128t_encrypt gift128n_to_nibbles(output, output); } +#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +/* The small variant uses fixslicing for encryption, but we need to change + * to bitslicing for decryption because of the difficulty of fast-forwarding + * the fixsliced key schedule to the end. So the tiny variant is used for + * decryption when the small variant is selected. Since the NIST AEAD modes + * for GIFT-128 only use the block encrypt operation, the inefficiencies + * in decryption don't matter all that much */ + +/** + * \def gift128b_load_and_forward_schedule() + * \brief Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 40; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 10 times for the full 40 rounds. The overall + * effect is to apply a "20 right and 40 left" bit-rotation to every + * word in the key schedule. That is equivalent to "4 right and 8 left" + * on the 16-bit sub-words. + */ +#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#else +/* The small variant needs to also undo some of the rotations that were + * done to generate the fixsliced version of the key schedule */ +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ + gift128b_swap_move(w3, w3, 0x00003333U, 18); \ + gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ + gift128b_swap_move(w3, w3, 0x00550055U, 9); \ + gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ + gift128b_swap_move(w1, w1, 0x00003333U, 18); \ + gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ + gift128b_swap_move(w1, w1, 0x00550055U, 9); \ + gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ + gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ + gift128b_swap_move(w2, w2, 0x03030303U, 6); \ + gift128b_swap_move(w2, w2, 0x11111111U, 3); \ + gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ + gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ + gift128b_swap_move(w0, w0, 0x03030303U, 6); \ + gift128b_swap_move(w0, w0, 0x11111111U, 3); \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#endif + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); + + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Copy the ciphertext into the state buffer and convert from nibbles */ + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); s2 = be_load_word32(output + 8); s3 = be_load_word32(output + 12); - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if ((round % 5) == 0 && round < 40) + s0 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); @@ -847,3 +1492,7 @@ void gift128t_decrypt be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } + +#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +#endif /* !GIFT128_VARIANT_ASM */ diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.h index 1ac40e5..f57d143 100644 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.h +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128.h @@ -47,11 +47,13 @@ * in any of the NIST submissions so we don't bother with it in this library. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ #include #include +#include "internal-gift128-config.h" #ifdef __cplusplus extern "C" { @@ -63,16 +65,23 @@ extern "C" { #define GIFT128_BLOCK_SIZE 16 /** - * \brief Number of round keys for the fixsliced representation of GIFT-128. + * \var GIFT128_ROUND_KEYS + * \brief Number of round keys for the GIFT-128 key schedule. */ +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY +#define GIFT128_ROUND_KEYS 4 +#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL +#define GIFT128_ROUND_KEYS 20 +#else #define GIFT128_ROUND_KEYS 80 +#endif /** * \brief Structure of the key schedule for GIFT-128 (bit-sliced). */ typedef struct { - /** Pre-computed round keys in the fixsliced form */ + /** Pre-computed round keys for bit-sliced GIFT-128 */ uint32_t k[GIFT128_ROUND_KEYS]; } gift128b_key_schedule_t; @@ -81,14 +90,9 @@ typedef struct * \brief Initializes the key schedule for GIFT-128 (bit-sliced). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). @@ -145,14 +149,9 @@ typedef gift128b_key_schedule_t gift128n_key_schedule_t; * \brief Initializes the key schedule for GIFT-128 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). @@ -182,13 +181,31 @@ void gift128n_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); +/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ +#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ +#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ +#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ +#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ +#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ +#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ +#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ +#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ +#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ +#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ +#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ +#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ +#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ +#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ +#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ +#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ + /** * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). * * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -200,7 +217,7 @@ void gift128n_decrypt */ void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); /** * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). @@ -208,7 +225,7 @@ void gift128t_encrypt * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -220,7 +237,7 @@ void gift128t_encrypt */ void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); #ifdef __cplusplus } diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-avr.S new file mode 100644 index 0000000..641613a --- /dev/null +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-avr.S @@ -0,0 +1,2104 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 40 +table_0: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-full-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-full-avr.S new file mode 100644 index 0000000..ff11875 --- /dev/null +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-full-avr.S @@ -0,0 +1,5037 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r13,X+ + ld r12,X+ + ld r11,X+ + ld r10,X+ + ld r5,X+ + ld r4,X+ + ld r3,X+ + ld r2,X+ + ld r9,X+ + ld r8,X+ + ld r7,X+ + ld r6,X+ + ld r29,X+ + ld r28,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + ldi r24,4 +33: + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r29 + ror r28 + ror r0 + lsr r29 + ror r28 + ror r0 + or r29,r0 + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r28 + mov r28,r4 + mov r4,r0 + mov r0,r29 + mov r29,r5 + mov r5,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + mov r0,r6 + mov r6,r10 + mov r10,r0 + mov r0,r7 + mov r7,r11 + mov r11,r0 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + st Z,r29 + std Z+1,r23 + std Z+2,r28 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+4,r29 + std Z+5,r23 + std Z+6,r28 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r28,Z+10 + ldd r29,Z+11 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+8,r29 + std Z+9,r23 + std Z+10,r28 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+12,r29 + std Z+13,r23 + std Z+14,r28 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+16,r29 + std Z+17,r23 + std Z+18,r28 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+20,r29 + std Z+21,r23 + std Z+22,r28 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+24,r29 + std Z+25,r23 + std Z+26,r28 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+28,r29 + std Z+29,r23 + std Z+30,r28 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + adiw r30,40 + movw r26,r30 + subi r26,80 + sbc r27,r1 + ldi r24,6 +1274: + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r2 + eor r19,r3 + andi r18,51 + andi r19,51 + eor r2,r18 + eor r3,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + movw r18,r22 + movw r20,r28 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + andi r28,204 + andi r29,204 + or r28,r21 + or r29,r18 + or r22,r19 + or r23,r20 + movw r18,r28 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r28 + eor r19,r29 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r28 + std Z+5,r29 + std Z+6,r22 + std Z+7,r23 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + swap r3 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + swap r5 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r29 + adc r29,r1 + lsl r29 + adc r29,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r28 + std Z+15,r29 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + ldi r25,85 + and r2,r25 + and r3,r25 + and r4,r25 + and r5,r25 + or r2,r19 + or r3,r20 + or r4,r21 + or r5,r18 + std Z+16,r4 + std Z+17,r5 + std Z+18,r2 + std Z+19,r3 + movw r18,r22 + movw r20,r28 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + andi r28,170 + andi r29,170 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + or r22,r18 + or r23,r19 + or r28,r20 + or r29,r21 + std Z+20,r29 + std Z+21,r22 + std Z+22,r23 + std Z+23,r28 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r14,r18 + movw r16,r20 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + eor r14,r18 + eor r15,r19 + eor r16,r20 + eor r17,r21 + ldi r25,8 + and r14,r25 + and r15,r25 + andi r16,8 + andi r17,8 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + ldi r17,15 + and r2,r17 + and r3,r17 + and r4,r17 + and r5,r17 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + movw r18,r28 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r2,r22 + movw r4,r28 + ldi r16,1 + and r2,r16 + and r3,r16 + and r4,r16 + and r5,r16 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + or r2,r18 + or r3,r19 + movw r18,r28 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r2,r18 + or r3,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r4,r18 + or r5,r19 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r4,r22 + or r5,r23 + std Z+28,r2 + std Z+29,r3 + std Z+30,r4 + std Z+31,r5 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + std Z+32,r3 + std Z+33,r2 + std Z+34,r4 + std Z+35,r5 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r28 + mov r28,r29 + mov r29,r0 + lsl r28 + rol r29 + adc r28,r1 + lsl r28 + rol r29 + adc r28,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r28 + std Z+39,r29 + dec r24 + breq 1733f + adiw r30,40 + rjmp 1274b +1733: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,160 + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rjmp 768f +30: + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r1 + lsr r22 + ror r0 + lsr r22 + ror r0 + or r22,r0 + mov r0,r1 + lsr r23 + ror r0 + lsr r23 + ror r0 + or r23,r0 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + swap r4 + swap r5 + swap r6 + swap r7 + lsl r8 + adc r8,r1 + lsl r8 + adc r8,r1 + lsl r9 + adc r9,r1 + lsl r9 + adc r9,r1 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,119 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,17 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +768: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-small-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-small-avr.S new file mode 100644 index 0000000..77ef9fd --- /dev/null +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-small-avr.S @@ -0,0 +1,6053 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +33: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +678: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + cpse r16,r1 + rjmp 678b + rjmp 1175f +830: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1175: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-tiny-avr.S b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-tiny-avr.S new file mode 100644 index 0000000..e7a03f1 --- /dev/null +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-gift128b-tiny-avr.S @@ -0,0 +1,6766 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z,r22 + std Z+1,r23 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-util.h b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-util.h +++ b/gift-cofb/Implementations/crypto_aead/giftcofb128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.c b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.h b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/api.h b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/api.h deleted file mode 100644 index fb1dab8..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/encrypt.c b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/encrypt.c deleted file mode 100644 index 53f563e..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "gimli24.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return gimli24_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return gimli24_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.c b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.c deleted file mode 100644 index 4bc7d9f..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "gimli24.h" -#include "internal-gimli24.h" -#include - -aead_cipher_t const gimli24_cipher = { - "GIMLI-24", - GIMLI24_KEY_SIZE, - GIMLI24_NONCE_SIZE, - GIMLI24_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - gimli24_aead_encrypt, - gimli24_aead_decrypt -}; - -aead_hash_algorithm_t const gimli24_hash_algorithm = { - "GIMLI-24-HASH", - sizeof(gimli24_hash_state_t), - GIMLI24_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - gimli24_hash, - (aead_hash_init_t)gimli24_hash_init, - (aead_hash_update_t)gimli24_hash_absorb, - (aead_hash_finalize_t)gimli24_hash_finalize, - (aead_xof_absorb_t)gimli24_hash_absorb, - (aead_xof_squeeze_t)gimli24_hash_squeeze -}; - -/** - * \brief Number of bytes of input or output data to process per block. - */ -#define GIMLI24_BLOCK_SIZE 16 - -/** - * \brief Structure of the GIMLI-24 state as both an array of words - * and an array of bytes. - */ -typedef union -{ - uint32_t words[12]; /**< Words in the state */ - uint8_t bytes[48]; /**< Bytes in the state */ - -} gimli24_state_t; - -/** - * \brief Absorbs data into a GIMLI-24 state. - * - * \param state The state to absorb the data into. - * \param data Points to the data to be absorbed. - * \param len Length of the data to be absorbed. - */ -static void gimli24_absorb - (gimli24_state_t *state, const unsigned char *data, unsigned long long len) -{ - unsigned temp; - while (len >= GIMLI24_BLOCK_SIZE) { - lw_xor_block(state->bytes, data, GIMLI24_BLOCK_SIZE); - gimli24_permute(state->words); - data += GIMLI24_BLOCK_SIZE; - len -= GIMLI24_BLOCK_SIZE; - } - temp = (unsigned)len; - lw_xor_block(state->bytes, data, temp); - state->bytes[temp] ^= 0x01; /* Padding */ - state->bytes[47] ^= 0x01; - gimli24_permute(state->words); -} - -/** - * \brief Encrypts a block of data with a GIMLI-24 state. - * - * \param state The state to encrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to encrypt from \a src into \a dest. - */ -static void gimli24_encrypt - (gimli24_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len) -{ - unsigned temp; - while (len >= GIMLI24_BLOCK_SIZE) { - lw_xor_block_2_dest(dest, state->bytes, src, GIMLI24_BLOCK_SIZE); - gimli24_permute(state->words); - dest += GIMLI24_BLOCK_SIZE; - src += GIMLI24_BLOCK_SIZE; - len -= GIMLI24_BLOCK_SIZE; - } - temp = (unsigned)len; - lw_xor_block_2_dest(dest, state->bytes, src, temp); - state->bytes[temp] ^= 0x01; /* Padding */ - state->bytes[47] ^= 0x01; - gimli24_permute(state->words); -} - -/** - * \brief Decrypts a block of data with a GIMLI-24 state. - * - * \param state The state to decrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to decrypt from \a src into \a dest. - */ -static void gimli24_decrypt - (gimli24_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len) -{ - unsigned temp; - while (len >= GIMLI24_BLOCK_SIZE) { - lw_xor_block_swap(dest, state->bytes, src, GIMLI24_BLOCK_SIZE); - gimli24_permute(state->words); - dest += GIMLI24_BLOCK_SIZE; - src += GIMLI24_BLOCK_SIZE; - len -= GIMLI24_BLOCK_SIZE; - } - temp = (unsigned)len; - lw_xor_block_swap(dest, state->bytes, src, temp); - state->bytes[temp] ^= 0x01; /* Padding */ - state->bytes[47] ^= 0x01; - gimli24_permute(state->words); -} - -int gimli24_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gimli24_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + GIMLI24_TAG_SIZE; - - /* Format the initial GIMLI state from the nonce and the key */ - memcpy(state.words, npub, GIMLI24_NONCE_SIZE); - memcpy(state.words + 4, k, GIMLI24_KEY_SIZE); - - /* Permute the initial state */ - gimli24_permute(state.words); - - /* Absorb the associated data */ - gimli24_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - gimli24_encrypt(&state, c, m, mlen); - - /* Generate the authentication tag at the end of the ciphertext */ - memcpy(c + mlen, state.bytes, GIMLI24_TAG_SIZE); - return 0; -} - -int gimli24_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gimli24_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < GIMLI24_TAG_SIZE) - return -1; - *mlen = clen - GIMLI24_TAG_SIZE; - - /* Format the initial GIMLI state from the nonce and the key */ - memcpy(state.words, npub, GIMLI24_NONCE_SIZE); - memcpy(state.words + 4, k, GIMLI24_KEY_SIZE); - - /* Permute the initial state */ - gimli24_permute(state.words); - - /* Absorb the associated data */ - gimli24_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - gimli24_decrypt(&state, m, c, *mlen); - - /* Check the authentication tag at the end of the packet */ - return aead_check_tag - (m, *mlen, state.bytes, c + *mlen, GIMLI24_TAG_SIZE); -} - -int gimli24_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - gimli24_state_t state; - - /* Initialize the hash state to all zeroes */ - memset(&state, 0, sizeof(state)); - - /* Absorb the input */ - gimli24_absorb(&state, in, inlen); - - /* Generate the output hash */ - memcpy(out, state.bytes, GIMLI24_HASH_SIZE / 2); - gimli24_permute(state.words); - memcpy(out + GIMLI24_HASH_SIZE / 2, state.bytes, GIMLI24_HASH_SIZE / 2); - return 0; -} - -void gimli24_hash_init(gimli24_hash_state_t *state) -{ - memset(state, 0, sizeof(gimli24_hash_state_t)); -} - -#define GIMLI24_XOF_RATE 16 -#define gimli24_xof_permute() \ - gimli24_permute((uint32_t *)(state->s.state)) - -void gimli24_hash_absorb - (gimli24_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned temp; - - if (state->s.mode) { - /* We were squeezing output - go back to the absorb phase */ - state->s.mode = 0; - state->s.count = 0; - gimli24_xof_permute(); - } - - /* Handle the partial left-over block from last time */ - if (state->s.count) { - temp = GIMLI24_XOF_RATE - state->s.count; - if (temp > inlen) { - temp = (unsigned)inlen; - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count += temp; - return; - } - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count = 0; - in += temp; - inlen -= temp; - gimli24_xof_permute(); - } - - /* Process full blocks that are aligned at state->s.count == 0 */ - while (inlen >= GIMLI24_XOF_RATE) { - lw_xor_block(state->s.state, in, GIMLI24_XOF_RATE); - in += GIMLI24_XOF_RATE; - inlen -= GIMLI24_XOF_RATE; - gimli24_xof_permute(); - } - - /* Process the left-over block at the end of the input */ - temp = (unsigned)inlen; - lw_xor_block(state->s.state, in, temp); - state->s.count = temp; -} - -void gimli24_hash_squeeze - (gimli24_hash_state_t *state, unsigned char *out, - unsigned long long outlen) -{ - unsigned temp; - - /* Pad the final input block if we were still in the absorb phase */ - if (!state->s.mode) { - state->s.state[state->s.count] ^= 0x01; - state->s.state[47] ^= 0x01; - state->s.count = 0; - state->s.mode = 1; - } - - /* Handle left-over partial blocks from last time */ - if (state->s.count) { - temp = GIMLI24_XOF_RATE - state->s.count; - if (temp > outlen) { - temp = (unsigned)outlen; - memcpy(out, state->s.state + state->s.count, temp); - state->s.count += temp; - return; - } - memcpy(out, state->s.state + state->s.count, temp); - out += temp; - outlen -= temp; - state->s.count = 0; - } - - /* Handle full blocks */ - while (outlen >= GIMLI24_XOF_RATE) { - gimli24_xof_permute(); - memcpy(out, state->s.state, GIMLI24_XOF_RATE); - out += GIMLI24_XOF_RATE; - outlen -= GIMLI24_XOF_RATE; - } - - /* Handle the left-over block */ - if (outlen > 0) { - temp = (unsigned)outlen; - gimli24_xof_permute(); - memcpy(out, state->s.state, temp); - state->s.count = temp; - } -} - -void gimli24_hash_finalize - (gimli24_hash_state_t *state, unsigned char *out) -{ - gimli24_hash_squeeze(state, out, GIMLI24_HASH_SIZE); -} diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.h b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.h deleted file mode 100644 index f72aec7..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/gimli24.h +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_GIMLI24_H -#define LWCRYPTO_GIMLI24_H - -#include "aead-common.h" - -/** - * \file gimli24.h - * \brief Gimli authenticated encryption algorithm. - * - * GIMLI-24-CIPHER has a 256-bit key, a 128-bit nonce, and a 128-bit tag. - * It is the spiritual successor to the widely used ChaCha20 and has a - * similar design. - * - * This library also includes an implementation of the hash algorithm - * GIMLI-24-HASH in both regular hashing and XOF modes. - * - * References: https://gimli.cr.yp.to/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for GIMLI-24. - */ -#define GIMLI24_KEY_SIZE 32 - -/** - * \brief Size of the nonce for GIMLI-24. - */ -#define GIMLI24_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for GIMLI-24. - */ -#define GIMLI24_TAG_SIZE 16 - -/** - * \brief Size of the hash output for GIMLI-24. - */ -#define GIMLI24_HASH_SIZE 32 - -/** - * \brief State information for GIMLI-24-HASH incremental modes. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} gimli24_hash_state_t; - -/** - * \brief Meta-information block for the GIMLI-24 cipher. - */ -extern aead_cipher_t const gimli24_cipher; - -/** - * \brief Meta-information block for the GIMLI-24-HASH algorithm. - * - * This meta-information block can also be used in XOF mode. - */ -extern aead_hash_algorithm_t const gimli24_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with GIMLI-24 using the - * full AEAD mode. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa gimli24_aead_decrypt() - */ -int gimli24_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with GIMLI-24 using the - * full AEAD mode. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa gimli24_aead_encrypt() - */ -int gimli24_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with GIMLI-24 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * GIMLI24_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int gimli24_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a GIMLI-24-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa gimli24_hash_absorb(), gimli24_hash_squeeze(), gimli24_hash() - */ -void gimli24_hash_init(gimli24_hash_state_t *state); - -/** - * \brief Aborbs more input data into a GIMLI-24-HASH state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa gimli24_hash_init(), gimli24_hash_squeeze() - */ -void gimli24_hash_absorb - (gimli24_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from an GIMLI-24-HASH state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa gimli24_hash_init(), gimli24_hash_absorb() - */ -void gimli24_hash_squeeze - (gimli24_hash_state_t *state, unsigned char *out, - unsigned long long outlen); - -/** - * \brief Returns the final hash value from a GIMLI-24-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - * - * \note This is a wrapper around gimli24_hash_squeeze() for a fixed length - * of GIMLI24_HASH_SIZE bytes. - * - * \sa gimli24_hash_init(), gimli24_hash_absorb() - */ -void gimli24_hash_finalize - (gimli24_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24-avr.S b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24-avr.S deleted file mode 100644 index efcd500..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24-avr.S +++ /dev/null @@ -1,9419 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gimli24_permute - .type gimli24_permute, @function -gimli24_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - ldi r31,24 - eor r4,r31 - ldi r30,121 - eor r5,r30 - ldi r25,55 - eor r6,r25 - ldi r24,158 - eor r7,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r4 - std Z+1,r5 - std Z+2,r6 - std Z+3,r7 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ld r4,Z - ldd r5,Z+1 - ldd r6,Z+2 - ldd r7,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - ldi r31,20 - eor r18,r31 - ldi r30,121 - eor r19,r30 - ldi r25,55 - eor r20,r25 - ldi r24,158 - eor r21,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - ldi r31,16 - eor r4,r31 - ldi r30,121 - eor r5,r30 - ldi r25,55 - eor r6,r25 - ldi r24,158 - eor r7,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r4 - std Z+1,r5 - std Z+2,r6 - std Z+3,r7 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ld r4,Z - ldd r5,Z+1 - ldd r6,Z+2 - ldd r7,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - ldi r31,12 - eor r18,r31 - ldi r30,121 - eor r19,r30 - ldi r25,55 - eor r20,r25 - ldi r24,158 - eor r21,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - ldi r31,8 - eor r4,r31 - ldi r30,121 - eor r5,r30 - ldi r25,55 - eor r6,r25 - ldi r24,158 - eor r7,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r4 - std Z+1,r5 - std Z+2,r6 - std Z+3,r7 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ld r4,Z - ldd r5,Z+1 - ldd r6,Z+2 - ldd r7,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - ldi r31,4 - eor r18,r31 - ldi r30,121 - eor r19,r30 - ldi r25,55 - eor r20,r25 - ldi r24,158 - eor r21,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - pop r4 - pop r5 - pop r6 - pop r7 - pop r18 - pop r19 - pop r20 - pop r21 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - eor r1,r1 - ret - .size gimli24_permute, .-gimli24_permute - -#endif diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.c b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.c deleted file mode 100644 index d719988..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.c +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gimli24.h" - -#if !defined(__AVR__) - -/* Apply the SP-box to a specific column in the state array */ -#define GIMLI24_SP(s0, s4, s8) \ - do { \ - x = leftRotate24(s0); \ - y = leftRotate9(s4); \ - s4 = y ^ x ^ ((x | s8) << 1); \ - s0 = s8 ^ y ^ ((x & y) << 3); \ - s8 = x ^ (s8 << 1) ^ ((y & s8) << 2); \ - } while (0) - -void gimli24_permute(uint32_t state[12]) -{ - uint32_t s0, s1, s2, s3, s4, s5; - uint32_t s6, s7, s8, s9, s10, s11; - uint32_t x, y; - unsigned round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s0 = state[0]; - s1 = state[1]; - s2 = state[2]; - s3 = state[3]; - s4 = state[4]; - s5 = state[5]; - s6 = state[6]; - s7 = state[7]; - s8 = state[8]; - s9 = state[9]; - s10 = state[10]; - s11 = state[11]; -#else - s0 = le_load_word32((const unsigned char *)(&(state[0]))); - s1 = le_load_word32((const unsigned char *)(&(state[1]))); - s2 = le_load_word32((const unsigned char *)(&(state[2]))); - s3 = le_load_word32((const unsigned char *)(&(state[3]))); - s4 = le_load_word32((const unsigned char *)(&(state[4]))); - s5 = le_load_word32((const unsigned char *)(&(state[5]))); - s6 = le_load_word32((const unsigned char *)(&(state[6]))); - s7 = le_load_word32((const unsigned char *)(&(state[7]))); - s8 = le_load_word32((const unsigned char *)(&(state[8]))); - s9 = le_load_word32((const unsigned char *)(&(state[9]))); - s10 = le_load_word32((const unsigned char *)(&(state[10]))); - s11 = le_load_word32((const unsigned char *)(&(state[11]))); -#endif - - /* Unroll and perform the rounds 4 at a time */ - for (round = 24; round > 0; round -= 4) { - /* Round 0: SP-box, small swap, add round constant */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - x = s0; - y = s2; - s0 = s1 ^ 0x9e377900U ^ round; - s1 = x; - s2 = s3; - s3 = y; - - /* Round 1: SP-box only */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - - /* Round 2: SP-box, big swap */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - x = s0; - y = s1; - s0 = s2; - s1 = s3; - s2 = x; - s3 = y; - - /* Round 3: SP-box only */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - } - - /* Convert state to little-endian if the platform is not little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; - state[4] = s4; - state[5] = s5; - state[6] = s6; - state[7] = s7; - state[8] = s8; - state[9] = s9; - state[10] = s10; - state[11] = s11; -#else - le_store_word32(((unsigned char *)(&(state[0]))), s0); - le_store_word32(((unsigned char *)(&(state[1]))), s1); - le_store_word32(((unsigned char *)(&(state[2]))), s2); - le_store_word32(((unsigned char *)(&(state[3]))), s3); - le_store_word32(((unsigned char *)(&(state[4]))), s4); - le_store_word32(((unsigned char *)(&(state[5]))), s5); - le_store_word32(((unsigned char *)(&(state[6]))), s6); - le_store_word32(((unsigned char *)(&(state[7]))), s7); - le_store_word32(((unsigned char *)(&(state[8]))), s8); - le_store_word32(((unsigned char *)(&(state[9]))), s9); - le_store_word32(((unsigned char *)(&(state[10]))), s10); - le_store_word32(((unsigned char *)(&(state[11]))), s11); -#endif -} - -#endif /* !__AVR__ */ diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.h b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.h deleted file mode 100644 index c81ead1..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-gimli24.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIMLI24_H -#define LW_INTERNAL_GIMLI24_H - -#include "internal-util.h" - -/** - * \file internal-gimli24.h - * \brief Internal implementation of the GIMLI-24 permutation. - * - * References: https://gimli.cr.yp.to/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Permutes the GIMLI-24 state. - * - * \param state The GIMLI-24 state to be permuted. - * - * The input and output \a state will be in little-endian byte order. - */ -void gimli24_permute(uint32_t state[12]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-util.h b/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24-avr.S b/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24-avr.S new file mode 100644 index 0000000..efcd500 --- /dev/null +++ b/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24-avr.S @@ -0,0 +1,9419 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gimli24_permute + .type gimli24_permute, @function +gimli24_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + ldi r31,24 + eor r4,r31 + ldi r30,121 + eor r5,r30 + ldi r25,55 + eor r6,r25 + ldi r24,158 + eor r7,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r4 + std Z+1,r5 + std Z+2,r6 + std Z+3,r7 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ld r4,Z + ldd r5,Z+1 + ldd r6,Z+2 + ldd r7,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + ldi r31,20 + eor r18,r31 + ldi r30,121 + eor r19,r30 + ldi r25,55 + eor r20,r25 + ldi r24,158 + eor r21,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + ldi r31,16 + eor r4,r31 + ldi r30,121 + eor r5,r30 + ldi r25,55 + eor r6,r25 + ldi r24,158 + eor r7,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r4 + std Z+1,r5 + std Z+2,r6 + std Z+3,r7 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ld r4,Z + ldd r5,Z+1 + ldd r6,Z+2 + ldd r7,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + ldi r31,12 + eor r18,r31 + ldi r30,121 + eor r19,r30 + ldi r25,55 + eor r20,r25 + ldi r24,158 + eor r21,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + ldi r31,8 + eor r4,r31 + ldi r30,121 + eor r5,r30 + ldi r25,55 + eor r6,r25 + ldi r24,158 + eor r7,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r4 + std Z+1,r5 + std Z+2,r6 + std Z+3,r7 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ld r4,Z + ldd r5,Z+1 + ldd r6,Z+2 + ldd r7,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + ldi r31,4 + eor r18,r31 + ldi r30,121 + eor r19,r30 + ldi r25,55 + eor r20,r25 + ldi r24,158 + eor r21,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + pop r4 + pop r5 + pop r6 + pop r7 + pop r18 + pop r19 + pop r20 + pop r21 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + eor r1,r1 + ret + .size gimli24_permute, .-gimli24_permute + +#endif diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24.c b/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24.c index ab2c830..d719988 100644 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24.c +++ b/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-gimli24.c @@ -22,6 +22,8 @@ #include "internal-gimli24.h" +#if !defined(__AVR__) + /* Apply the SP-box to a specific column in the state array */ #define GIMLI24_SP(s0, s4, s8) \ do { \ @@ -136,3 +138,5 @@ void gimli24_permute(uint32_t state[12]) le_store_word32(((unsigned char *)(&(state[11]))), s11); #endif } + +#endif /* !__AVR__ */ diff --git a/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-util.h b/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-util.h +++ b/gimli/Implementations/crypto_aead/gimli24v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/api.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.c deleted file mode 100644 index 4bc7d9f..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "gimli24.h" -#include "internal-gimli24.h" -#include - -aead_cipher_t const gimli24_cipher = { - "GIMLI-24", - GIMLI24_KEY_SIZE, - GIMLI24_NONCE_SIZE, - GIMLI24_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - gimli24_aead_encrypt, - gimli24_aead_decrypt -}; - -aead_hash_algorithm_t const gimli24_hash_algorithm = { - "GIMLI-24-HASH", - sizeof(gimli24_hash_state_t), - GIMLI24_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - gimli24_hash, - (aead_hash_init_t)gimli24_hash_init, - (aead_hash_update_t)gimli24_hash_absorb, - (aead_hash_finalize_t)gimli24_hash_finalize, - (aead_xof_absorb_t)gimli24_hash_absorb, - (aead_xof_squeeze_t)gimli24_hash_squeeze -}; - -/** - * \brief Number of bytes of input or output data to process per block. - */ -#define GIMLI24_BLOCK_SIZE 16 - -/** - * \brief Structure of the GIMLI-24 state as both an array of words - * and an array of bytes. - */ -typedef union -{ - uint32_t words[12]; /**< Words in the state */ - uint8_t bytes[48]; /**< Bytes in the state */ - -} gimli24_state_t; - -/** - * \brief Absorbs data into a GIMLI-24 state. - * - * \param state The state to absorb the data into. - * \param data Points to the data to be absorbed. - * \param len Length of the data to be absorbed. - */ -static void gimli24_absorb - (gimli24_state_t *state, const unsigned char *data, unsigned long long len) -{ - unsigned temp; - while (len >= GIMLI24_BLOCK_SIZE) { - lw_xor_block(state->bytes, data, GIMLI24_BLOCK_SIZE); - gimli24_permute(state->words); - data += GIMLI24_BLOCK_SIZE; - len -= GIMLI24_BLOCK_SIZE; - } - temp = (unsigned)len; - lw_xor_block(state->bytes, data, temp); - state->bytes[temp] ^= 0x01; /* Padding */ - state->bytes[47] ^= 0x01; - gimli24_permute(state->words); -} - -/** - * \brief Encrypts a block of data with a GIMLI-24 state. - * - * \param state The state to encrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to encrypt from \a src into \a dest. - */ -static void gimli24_encrypt - (gimli24_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len) -{ - unsigned temp; - while (len >= GIMLI24_BLOCK_SIZE) { - lw_xor_block_2_dest(dest, state->bytes, src, GIMLI24_BLOCK_SIZE); - gimli24_permute(state->words); - dest += GIMLI24_BLOCK_SIZE; - src += GIMLI24_BLOCK_SIZE; - len -= GIMLI24_BLOCK_SIZE; - } - temp = (unsigned)len; - lw_xor_block_2_dest(dest, state->bytes, src, temp); - state->bytes[temp] ^= 0x01; /* Padding */ - state->bytes[47] ^= 0x01; - gimli24_permute(state->words); -} - -/** - * \brief Decrypts a block of data with a GIMLI-24 state. - * - * \param state The state to decrypt with. - * \param dest Points to the destination buffer. - * \param src Points to the source buffer. - * \param len Length of the data to decrypt from \a src into \a dest. - */ -static void gimli24_decrypt - (gimli24_state_t *state, unsigned char *dest, - const unsigned char *src, unsigned long long len) -{ - unsigned temp; - while (len >= GIMLI24_BLOCK_SIZE) { - lw_xor_block_swap(dest, state->bytes, src, GIMLI24_BLOCK_SIZE); - gimli24_permute(state->words); - dest += GIMLI24_BLOCK_SIZE; - src += GIMLI24_BLOCK_SIZE; - len -= GIMLI24_BLOCK_SIZE; - } - temp = (unsigned)len; - lw_xor_block_swap(dest, state->bytes, src, temp); - state->bytes[temp] ^= 0x01; /* Padding */ - state->bytes[47] ^= 0x01; - gimli24_permute(state->words); -} - -int gimli24_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gimli24_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + GIMLI24_TAG_SIZE; - - /* Format the initial GIMLI state from the nonce and the key */ - memcpy(state.words, npub, GIMLI24_NONCE_SIZE); - memcpy(state.words + 4, k, GIMLI24_KEY_SIZE); - - /* Permute the initial state */ - gimli24_permute(state.words); - - /* Absorb the associated data */ - gimli24_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - gimli24_encrypt(&state, c, m, mlen); - - /* Generate the authentication tag at the end of the ciphertext */ - memcpy(c + mlen, state.bytes, GIMLI24_TAG_SIZE); - return 0; -} - -int gimli24_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gimli24_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < GIMLI24_TAG_SIZE) - return -1; - *mlen = clen - GIMLI24_TAG_SIZE; - - /* Format the initial GIMLI state from the nonce and the key */ - memcpy(state.words, npub, GIMLI24_NONCE_SIZE); - memcpy(state.words + 4, k, GIMLI24_KEY_SIZE); - - /* Permute the initial state */ - gimli24_permute(state.words); - - /* Absorb the associated data */ - gimli24_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - gimli24_decrypt(&state, m, c, *mlen); - - /* Check the authentication tag at the end of the packet */ - return aead_check_tag - (m, *mlen, state.bytes, c + *mlen, GIMLI24_TAG_SIZE); -} - -int gimli24_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - gimli24_state_t state; - - /* Initialize the hash state to all zeroes */ - memset(&state, 0, sizeof(state)); - - /* Absorb the input */ - gimli24_absorb(&state, in, inlen); - - /* Generate the output hash */ - memcpy(out, state.bytes, GIMLI24_HASH_SIZE / 2); - gimli24_permute(state.words); - memcpy(out + GIMLI24_HASH_SIZE / 2, state.bytes, GIMLI24_HASH_SIZE / 2); - return 0; -} - -void gimli24_hash_init(gimli24_hash_state_t *state) -{ - memset(state, 0, sizeof(gimli24_hash_state_t)); -} - -#define GIMLI24_XOF_RATE 16 -#define gimli24_xof_permute() \ - gimli24_permute((uint32_t *)(state->s.state)) - -void gimli24_hash_absorb - (gimli24_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned temp; - - if (state->s.mode) { - /* We were squeezing output - go back to the absorb phase */ - state->s.mode = 0; - state->s.count = 0; - gimli24_xof_permute(); - } - - /* Handle the partial left-over block from last time */ - if (state->s.count) { - temp = GIMLI24_XOF_RATE - state->s.count; - if (temp > inlen) { - temp = (unsigned)inlen; - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count += temp; - return; - } - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count = 0; - in += temp; - inlen -= temp; - gimli24_xof_permute(); - } - - /* Process full blocks that are aligned at state->s.count == 0 */ - while (inlen >= GIMLI24_XOF_RATE) { - lw_xor_block(state->s.state, in, GIMLI24_XOF_RATE); - in += GIMLI24_XOF_RATE; - inlen -= GIMLI24_XOF_RATE; - gimli24_xof_permute(); - } - - /* Process the left-over block at the end of the input */ - temp = (unsigned)inlen; - lw_xor_block(state->s.state, in, temp); - state->s.count = temp; -} - -void gimli24_hash_squeeze - (gimli24_hash_state_t *state, unsigned char *out, - unsigned long long outlen) -{ - unsigned temp; - - /* Pad the final input block if we were still in the absorb phase */ - if (!state->s.mode) { - state->s.state[state->s.count] ^= 0x01; - state->s.state[47] ^= 0x01; - state->s.count = 0; - state->s.mode = 1; - } - - /* Handle left-over partial blocks from last time */ - if (state->s.count) { - temp = GIMLI24_XOF_RATE - state->s.count; - if (temp > outlen) { - temp = (unsigned)outlen; - memcpy(out, state->s.state + state->s.count, temp); - state->s.count += temp; - return; - } - memcpy(out, state->s.state + state->s.count, temp); - out += temp; - outlen -= temp; - state->s.count = 0; - } - - /* Handle full blocks */ - while (outlen >= GIMLI24_XOF_RATE) { - gimli24_xof_permute(); - memcpy(out, state->s.state, GIMLI24_XOF_RATE); - out += GIMLI24_XOF_RATE; - outlen -= GIMLI24_XOF_RATE; - } - - /* Handle the left-over block */ - if (outlen > 0) { - temp = (unsigned)outlen; - gimli24_xof_permute(); - memcpy(out, state->s.state, temp); - state->s.count = temp; - } -} - -void gimli24_hash_finalize - (gimli24_hash_state_t *state, unsigned char *out) -{ - gimli24_hash_squeeze(state, out, GIMLI24_HASH_SIZE); -} diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.h deleted file mode 100644 index f72aec7..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/gimli24.h +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_GIMLI24_H -#define LWCRYPTO_GIMLI24_H - -#include "aead-common.h" - -/** - * \file gimli24.h - * \brief Gimli authenticated encryption algorithm. - * - * GIMLI-24-CIPHER has a 256-bit key, a 128-bit nonce, and a 128-bit tag. - * It is the spiritual successor to the widely used ChaCha20 and has a - * similar design. - * - * This library also includes an implementation of the hash algorithm - * GIMLI-24-HASH in both regular hashing and XOF modes. - * - * References: https://gimli.cr.yp.to/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for GIMLI-24. - */ -#define GIMLI24_KEY_SIZE 32 - -/** - * \brief Size of the nonce for GIMLI-24. - */ -#define GIMLI24_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for GIMLI-24. - */ -#define GIMLI24_TAG_SIZE 16 - -/** - * \brief Size of the hash output for GIMLI-24. - */ -#define GIMLI24_HASH_SIZE 32 - -/** - * \brief State information for GIMLI-24-HASH incremental modes. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} gimli24_hash_state_t; - -/** - * \brief Meta-information block for the GIMLI-24 cipher. - */ -extern aead_cipher_t const gimli24_cipher; - -/** - * \brief Meta-information block for the GIMLI-24-HASH algorithm. - * - * This meta-information block can also be used in XOF mode. - */ -extern aead_hash_algorithm_t const gimli24_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with GIMLI-24 using the - * full AEAD mode. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa gimli24_aead_decrypt() - */ -int gimli24_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with GIMLI-24 using the - * full AEAD mode. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa gimli24_aead_encrypt() - */ -int gimli24_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with GIMLI-24 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * GIMLI24_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int gimli24_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a GIMLI-24-HASH hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa gimli24_hash_absorb(), gimli24_hash_squeeze(), gimli24_hash() - */ -void gimli24_hash_init(gimli24_hash_state_t *state); - -/** - * \brief Aborbs more input data into a GIMLI-24-HASH state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa gimli24_hash_init(), gimli24_hash_squeeze() - */ -void gimli24_hash_absorb - (gimli24_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from an GIMLI-24-HASH state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa gimli24_hash_init(), gimli24_hash_absorb() - */ -void gimli24_hash_squeeze - (gimli24_hash_state_t *state, unsigned char *out, - unsigned long long outlen); - -/** - * \brief Returns the final hash value from a GIMLI-24-HASH hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - * - * \note This is a wrapper around gimli24_hash_squeeze() for a fixed length - * of GIMLI24_HASH_SIZE bytes. - * - * \sa gimli24_hash_init(), gimli24_hash_absorb() - */ -void gimli24_hash_finalize - (gimli24_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/hash.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/hash.c deleted file mode 100644 index 93789b1..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "gimli24.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return gimli24_hash(out, in, inlen); -} diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24-avr.S b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24-avr.S deleted file mode 100644 index efcd500..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24-avr.S +++ /dev/null @@ -1,9419 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gimli24_permute - .type gimli24_permute, @function -gimli24_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - ldi r31,24 - eor r4,r31 - ldi r30,121 - eor r5,r30 - ldi r25,55 - eor r6,r25 - ldi r24,158 - eor r7,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r4 - std Z+1,r5 - std Z+2,r6 - std Z+3,r7 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ld r4,Z - ldd r5,Z+1 - ldd r6,Z+2 - ldd r7,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - ldi r31,20 - eor r18,r31 - ldi r30,121 - eor r19,r30 - ldi r25,55 - eor r20,r25 - ldi r24,158 - eor r21,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - ldi r31,16 - eor r4,r31 - ldi r30,121 - eor r5,r30 - ldi r25,55 - eor r6,r25 - ldi r24,158 - eor r7,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r4 - std Z+1,r5 - std Z+2,r6 - std Z+3,r7 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ld r4,Z - ldd r5,Z+1 - ldd r6,Z+2 - ldd r7,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - ldi r31,12 - eor r18,r31 - ldi r30,121 - eor r19,r30 - ldi r25,55 - eor r20,r25 - ldi r24,158 - eor r21,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - ldi r31,8 - eor r4,r31 - ldi r30,121 - eor r5,r30 - ldi r25,55 - eor r6,r25 - ldi r24,158 - eor r7,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r4 - std Z+1,r5 - std Z+2,r6 - std Z+3,r7 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - ld r4,Z - ldd r5,Z+1 - ldd r6,Z+2 - ldd r7,Z+3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r28,Z+32 - ldd r29,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - ldd r12,Z+36 - ldd r13,Z+37 - ldd r14,Z+38 - ldd r15,Z+39 - pop r18 - pop r19 - pop r20 - pop r21 - pop r4 - pop r5 - pop r6 - pop r7 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r27 - mov r5,r22 - mov r6,r23 - mov r7,r26 - and r4,r28 - and r5,r29 - and r6,r2 - and r7,r3 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r4 - movw r2,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r11 - mov r19,r8 - mov r20,r9 - mov r21,r10 - and r18,r12 - and r19,r13 - and r20,r14 - and r21,r15 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r18 - movw r14,r20 - movw r18,r30 - movw r20,r24 - ldi r31,4 - eor r18,r31 - ldi r30,121 - eor r19,r30 - ldi r25,55 - eor r20,r25 - ldi r24,158 - eor r21,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - pop r30 - pop r31 - push r21 - push r20 - push r19 - push r18 - push r7 - push r6 - push r5 - push r4 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+32,r28 - std Z+33,r29 - std Z+34,r2 - std Z+35,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - std Z+36,r12 - std Z+37,r13 - std Z+38,r14 - std Z+39,r15 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - pop r4 - pop r5 - pop r6 - pop r7 - pop r18 - pop r19 - pop r20 - pop r21 - push r31 - push r30 - bst r26,7 - lsl r27 - rol r22 - rol r23 - rol r26 - bld r27,0 - mov r30,r19 - mov r31,r20 - mov r24,r21 - mov r25,r18 - movw r16,r28 - mov r1,r2 - mov r0,r3 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r18,r27 - mov r19,r22 - mov r20,r23 - mov r21,r26 - and r18,r28 - and r19,r29 - and r20,r2 - and r21,r3 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r18,r16 - eor r19,r17 - eor r20,r1 - eor r21,r0 - eor r18,r30 - eor r19,r31 - eor r20,r24 - eor r21,r25 - mov r16,r27 - mov r17,r22 - mov r1,r23 - mov r0,r26 - movw r22,r30 - movw r26,r24 - or r22,r28 - or r23,r29 - or r26,r2 - or r27,r3 - lsl r22 - rol r23 - rol r26 - rol r27 - eor r22,r30 - eor r23,r31 - eor r26,r24 - eor r27,r25 - eor r22,r16 - eor r23,r17 - eor r26,r1 - eor r27,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r28 - eor r31,r29 - eor r24,r2 - eor r25,r3 - movw r28,r18 - movw r2,r20 - movw r18,r30 - movw r20,r24 - bst r10,7 - lsl r11 - rol r8 - rol r9 - rol r10 - bld r11,0 - mov r30,r5 - mov r31,r6 - mov r24,r7 - mov r25,r4 - movw r16,r12 - mov r1,r14 - mov r0,r15 - lsl r16 - rol r17 - rol r1 - rol r0 - mov r4,r11 - mov r5,r8 - mov r6,r9 - mov r7,r10 - and r4,r12 - and r5,r13 - and r6,r14 - and r7,r15 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r4,r16 - eor r5,r17 - eor r6,r1 - eor r7,r0 - eor r4,r30 - eor r5,r31 - eor r6,r24 - eor r7,r25 - mov r16,r11 - mov r17,r8 - mov r1,r9 - mov r0,r10 - movw r8,r30 - movw r10,r24 - or r8,r12 - or r9,r13 - or r10,r14 - or r11,r15 - lsl r8 - rol r9 - rol r10 - rol r11 - eor r8,r30 - eor r9,r31 - eor r10,r24 - eor r11,r25 - eor r8,r16 - eor r9,r17 - eor r10,r1 - eor r11,r0 - and r30,r16 - and r31,r17 - and r24,r1 - and r25,r0 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - lsl r30 - rol r31 - rol r24 - rol r25 - eor r30,r16 - eor r31,r17 - eor r24,r1 - eor r25,r0 - eor r30,r12 - eor r31,r13 - eor r24,r14 - eor r25,r15 - movw r12,r4 - movw r14,r6 - movw r4,r30 - movw r6,r24 - pop r30 - pop r31 - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - eor r1,r1 - ret - .size gimli24_permute, .-gimli24_permute - -#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.c deleted file mode 100644 index d719988..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.c +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gimli24.h" - -#if !defined(__AVR__) - -/* Apply the SP-box to a specific column in the state array */ -#define GIMLI24_SP(s0, s4, s8) \ - do { \ - x = leftRotate24(s0); \ - y = leftRotate9(s4); \ - s4 = y ^ x ^ ((x | s8) << 1); \ - s0 = s8 ^ y ^ ((x & y) << 3); \ - s8 = x ^ (s8 << 1) ^ ((y & s8) << 2); \ - } while (0) - -void gimli24_permute(uint32_t state[12]) -{ - uint32_t s0, s1, s2, s3, s4, s5; - uint32_t s6, s7, s8, s9, s10, s11; - uint32_t x, y; - unsigned round; - - /* Load the state into local variables and convert from little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s0 = state[0]; - s1 = state[1]; - s2 = state[2]; - s3 = state[3]; - s4 = state[4]; - s5 = state[5]; - s6 = state[6]; - s7 = state[7]; - s8 = state[8]; - s9 = state[9]; - s10 = state[10]; - s11 = state[11]; -#else - s0 = le_load_word32((const unsigned char *)(&(state[0]))); - s1 = le_load_word32((const unsigned char *)(&(state[1]))); - s2 = le_load_word32((const unsigned char *)(&(state[2]))); - s3 = le_load_word32((const unsigned char *)(&(state[3]))); - s4 = le_load_word32((const unsigned char *)(&(state[4]))); - s5 = le_load_word32((const unsigned char *)(&(state[5]))); - s6 = le_load_word32((const unsigned char *)(&(state[6]))); - s7 = le_load_word32((const unsigned char *)(&(state[7]))); - s8 = le_load_word32((const unsigned char *)(&(state[8]))); - s9 = le_load_word32((const unsigned char *)(&(state[9]))); - s10 = le_load_word32((const unsigned char *)(&(state[10]))); - s11 = le_load_word32((const unsigned char *)(&(state[11]))); -#endif - - /* Unroll and perform the rounds 4 at a time */ - for (round = 24; round > 0; round -= 4) { - /* Round 0: SP-box, small swap, add round constant */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - x = s0; - y = s2; - s0 = s1 ^ 0x9e377900U ^ round; - s1 = x; - s2 = s3; - s3 = y; - - /* Round 1: SP-box only */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - - /* Round 2: SP-box, big swap */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - x = s0; - y = s1; - s0 = s2; - s1 = s3; - s2 = x; - s3 = y; - - /* Round 3: SP-box only */ - GIMLI24_SP(s0, s4, s8); - GIMLI24_SP(s1, s5, s9); - GIMLI24_SP(s2, s6, s10); - GIMLI24_SP(s3, s7, s11); - } - - /* Convert state to little-endian if the platform is not little-endian */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; - state[4] = s4; - state[5] = s5; - state[6] = s6; - state[7] = s7; - state[8] = s8; - state[9] = s9; - state[10] = s10; - state[11] = s11; -#else - le_store_word32(((unsigned char *)(&(state[0]))), s0); - le_store_word32(((unsigned char *)(&(state[1]))), s1); - le_store_word32(((unsigned char *)(&(state[2]))), s2); - le_store_word32(((unsigned char *)(&(state[3]))), s3); - le_store_word32(((unsigned char *)(&(state[4]))), s4); - le_store_word32(((unsigned char *)(&(state[5]))), s5); - le_store_word32(((unsigned char *)(&(state[6]))), s6); - le_store_word32(((unsigned char *)(&(state[7]))), s7); - le_store_word32(((unsigned char *)(&(state[8]))), s8); - le_store_word32(((unsigned char *)(&(state[9]))), s9); - le_store_word32(((unsigned char *)(&(state[10]))), s10); - le_store_word32(((unsigned char *)(&(state[11]))), s11); -#endif -} - -#endif /* !__AVR__ */ diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.h deleted file mode 100644 index c81ead1..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-gimli24.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIMLI24_H -#define LW_INTERNAL_GIMLI24_H - -#include "internal-util.h" - -/** - * \file internal-gimli24.h - * \brief Internal implementation of the GIMLI-24 permutation. - * - * References: https://gimli.cr.yp.to/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Permutes the GIMLI-24 state. - * - * \param state The GIMLI-24 state to be permuted. - * - * The input and output \a state will be in little-endian byte order. - */ -void gimli24_permute(uint32_t state[12]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-util.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/gimli/Implementations/crypto_hash/gimli24v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/api.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.c new file mode 100644 index 0000000..4bc7d9f --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.c @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gimli24.h" +#include "internal-gimli24.h" +#include + +aead_cipher_t const gimli24_cipher = { + "GIMLI-24", + GIMLI24_KEY_SIZE, + GIMLI24_NONCE_SIZE, + GIMLI24_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + gimli24_aead_encrypt, + gimli24_aead_decrypt +}; + +aead_hash_algorithm_t const gimli24_hash_algorithm = { + "GIMLI-24-HASH", + sizeof(gimli24_hash_state_t), + GIMLI24_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + gimli24_hash, + (aead_hash_init_t)gimli24_hash_init, + (aead_hash_update_t)gimli24_hash_absorb, + (aead_hash_finalize_t)gimli24_hash_finalize, + (aead_xof_absorb_t)gimli24_hash_absorb, + (aead_xof_squeeze_t)gimli24_hash_squeeze +}; + +/** + * \brief Number of bytes of input or output data to process per block. + */ +#define GIMLI24_BLOCK_SIZE 16 + +/** + * \brief Structure of the GIMLI-24 state as both an array of words + * and an array of bytes. + */ +typedef union +{ + uint32_t words[12]; /**< Words in the state */ + uint8_t bytes[48]; /**< Bytes in the state */ + +} gimli24_state_t; + +/** + * \brief Absorbs data into a GIMLI-24 state. + * + * \param state The state to absorb the data into. + * \param data Points to the data to be absorbed. + * \param len Length of the data to be absorbed. + */ +static void gimli24_absorb + (gimli24_state_t *state, const unsigned char *data, unsigned long long len) +{ + unsigned temp; + while (len >= GIMLI24_BLOCK_SIZE) { + lw_xor_block(state->bytes, data, GIMLI24_BLOCK_SIZE); + gimli24_permute(state->words); + data += GIMLI24_BLOCK_SIZE; + len -= GIMLI24_BLOCK_SIZE; + } + temp = (unsigned)len; + lw_xor_block(state->bytes, data, temp); + state->bytes[temp] ^= 0x01; /* Padding */ + state->bytes[47] ^= 0x01; + gimli24_permute(state->words); +} + +/** + * \brief Encrypts a block of data with a GIMLI-24 state. + * + * \param state The state to encrypt with. + * \param dest Points to the destination buffer. + * \param src Points to the source buffer. + * \param len Length of the data to encrypt from \a src into \a dest. + */ +static void gimli24_encrypt + (gimli24_state_t *state, unsigned char *dest, + const unsigned char *src, unsigned long long len) +{ + unsigned temp; + while (len >= GIMLI24_BLOCK_SIZE) { + lw_xor_block_2_dest(dest, state->bytes, src, GIMLI24_BLOCK_SIZE); + gimli24_permute(state->words); + dest += GIMLI24_BLOCK_SIZE; + src += GIMLI24_BLOCK_SIZE; + len -= GIMLI24_BLOCK_SIZE; + } + temp = (unsigned)len; + lw_xor_block_2_dest(dest, state->bytes, src, temp); + state->bytes[temp] ^= 0x01; /* Padding */ + state->bytes[47] ^= 0x01; + gimli24_permute(state->words); +} + +/** + * \brief Decrypts a block of data with a GIMLI-24 state. + * + * \param state The state to decrypt with. + * \param dest Points to the destination buffer. + * \param src Points to the source buffer. + * \param len Length of the data to decrypt from \a src into \a dest. + */ +static void gimli24_decrypt + (gimli24_state_t *state, unsigned char *dest, + const unsigned char *src, unsigned long long len) +{ + unsigned temp; + while (len >= GIMLI24_BLOCK_SIZE) { + lw_xor_block_swap(dest, state->bytes, src, GIMLI24_BLOCK_SIZE); + gimli24_permute(state->words); + dest += GIMLI24_BLOCK_SIZE; + src += GIMLI24_BLOCK_SIZE; + len -= GIMLI24_BLOCK_SIZE; + } + temp = (unsigned)len; + lw_xor_block_swap(dest, state->bytes, src, temp); + state->bytes[temp] ^= 0x01; /* Padding */ + state->bytes[47] ^= 0x01; + gimli24_permute(state->words); +} + +int gimli24_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + gimli24_state_t state; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + GIMLI24_TAG_SIZE; + + /* Format the initial GIMLI state from the nonce and the key */ + memcpy(state.words, npub, GIMLI24_NONCE_SIZE); + memcpy(state.words + 4, k, GIMLI24_KEY_SIZE); + + /* Permute the initial state */ + gimli24_permute(state.words); + + /* Absorb the associated data */ + gimli24_absorb(&state, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + gimli24_encrypt(&state, c, m, mlen); + + /* Generate the authentication tag at the end of the ciphertext */ + memcpy(c + mlen, state.bytes, GIMLI24_TAG_SIZE); + return 0; +} + +int gimli24_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + gimli24_state_t state; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < GIMLI24_TAG_SIZE) + return -1; + *mlen = clen - GIMLI24_TAG_SIZE; + + /* Format the initial GIMLI state from the nonce and the key */ + memcpy(state.words, npub, GIMLI24_NONCE_SIZE); + memcpy(state.words + 4, k, GIMLI24_KEY_SIZE); + + /* Permute the initial state */ + gimli24_permute(state.words); + + /* Absorb the associated data */ + gimli24_absorb(&state, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + gimli24_decrypt(&state, m, c, *mlen); + + /* Check the authentication tag at the end of the packet */ + return aead_check_tag + (m, *mlen, state.bytes, c + *mlen, GIMLI24_TAG_SIZE); +} + +int gimli24_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + gimli24_state_t state; + + /* Initialize the hash state to all zeroes */ + memset(&state, 0, sizeof(state)); + + /* Absorb the input */ + gimli24_absorb(&state, in, inlen); + + /* Generate the output hash */ + memcpy(out, state.bytes, GIMLI24_HASH_SIZE / 2); + gimli24_permute(state.words); + memcpy(out + GIMLI24_HASH_SIZE / 2, state.bytes, GIMLI24_HASH_SIZE / 2); + return 0; +} + +void gimli24_hash_init(gimli24_hash_state_t *state) +{ + memset(state, 0, sizeof(gimli24_hash_state_t)); +} + +#define GIMLI24_XOF_RATE 16 +#define gimli24_xof_permute() \ + gimli24_permute((uint32_t *)(state->s.state)) + +void gimli24_hash_absorb + (gimli24_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + unsigned temp; + + if (state->s.mode) { + /* We were squeezing output - go back to the absorb phase */ + state->s.mode = 0; + state->s.count = 0; + gimli24_xof_permute(); + } + + /* Handle the partial left-over block from last time */ + if (state->s.count) { + temp = GIMLI24_XOF_RATE - state->s.count; + if (temp > inlen) { + temp = (unsigned)inlen; + lw_xor_block(state->s.state + state->s.count, in, temp); + state->s.count += temp; + return; + } + lw_xor_block(state->s.state + state->s.count, in, temp); + state->s.count = 0; + in += temp; + inlen -= temp; + gimli24_xof_permute(); + } + + /* Process full blocks that are aligned at state->s.count == 0 */ + while (inlen >= GIMLI24_XOF_RATE) { + lw_xor_block(state->s.state, in, GIMLI24_XOF_RATE); + in += GIMLI24_XOF_RATE; + inlen -= GIMLI24_XOF_RATE; + gimli24_xof_permute(); + } + + /* Process the left-over block at the end of the input */ + temp = (unsigned)inlen; + lw_xor_block(state->s.state, in, temp); + state->s.count = temp; +} + +void gimli24_hash_squeeze + (gimli24_hash_state_t *state, unsigned char *out, + unsigned long long outlen) +{ + unsigned temp; + + /* Pad the final input block if we were still in the absorb phase */ + if (!state->s.mode) { + state->s.state[state->s.count] ^= 0x01; + state->s.state[47] ^= 0x01; + state->s.count = 0; + state->s.mode = 1; + } + + /* Handle left-over partial blocks from last time */ + if (state->s.count) { + temp = GIMLI24_XOF_RATE - state->s.count; + if (temp > outlen) { + temp = (unsigned)outlen; + memcpy(out, state->s.state + state->s.count, temp); + state->s.count += temp; + return; + } + memcpy(out, state->s.state + state->s.count, temp); + out += temp; + outlen -= temp; + state->s.count = 0; + } + + /* Handle full blocks */ + while (outlen >= GIMLI24_XOF_RATE) { + gimli24_xof_permute(); + memcpy(out, state->s.state, GIMLI24_XOF_RATE); + out += GIMLI24_XOF_RATE; + outlen -= GIMLI24_XOF_RATE; + } + + /* Handle the left-over block */ + if (outlen > 0) { + temp = (unsigned)outlen; + gimli24_xof_permute(); + memcpy(out, state->s.state, temp); + state->s.count = temp; + } +} + +void gimli24_hash_finalize + (gimli24_hash_state_t *state, unsigned char *out) +{ + gimli24_hash_squeeze(state, out, GIMLI24_HASH_SIZE); +} diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.h new file mode 100644 index 0000000..f72aec7 --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/gimli24.h @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_GIMLI24_H +#define LWCRYPTO_GIMLI24_H + +#include "aead-common.h" + +/** + * \file gimli24.h + * \brief Gimli authenticated encryption algorithm. + * + * GIMLI-24-CIPHER has a 256-bit key, a 128-bit nonce, and a 128-bit tag. + * It is the spiritual successor to the widely used ChaCha20 and has a + * similar design. + * + * This library also includes an implementation of the hash algorithm + * GIMLI-24-HASH in both regular hashing and XOF modes. + * + * References: https://gimli.cr.yp.to/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for GIMLI-24. + */ +#define GIMLI24_KEY_SIZE 32 + +/** + * \brief Size of the nonce for GIMLI-24. + */ +#define GIMLI24_NONCE_SIZE 16 + +/** + * \brief Size of the authentication tag for GIMLI-24. + */ +#define GIMLI24_TAG_SIZE 16 + +/** + * \brief Size of the hash output for GIMLI-24. + */ +#define GIMLI24_HASH_SIZE 32 + +/** + * \brief State information for GIMLI-24-HASH incremental modes. + */ +typedef union +{ + struct { + unsigned char state[48]; /**< Current hash state */ + unsigned char count; /**< Number of bytes in the current block */ + unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} gimli24_hash_state_t; + +/** + * \brief Meta-information block for the GIMLI-24 cipher. + */ +extern aead_cipher_t const gimli24_cipher; + +/** + * \brief Meta-information block for the GIMLI-24-HASH algorithm. + * + * This meta-information block can also be used in XOF mode. + */ +extern aead_hash_algorithm_t const gimli24_hash_algorithm; + +/** + * \brief Encrypts and authenticates a packet with GIMLI-24 using the + * full AEAD mode. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 32 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa gimli24_aead_decrypt() + */ +int gimli24_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with GIMLI-24 using the + * full AEAD mode. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 32 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa gimli24_aead_encrypt() + */ +int gimli24_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with GIMLI-24 to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * GIMLI24_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int gimli24_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a GIMLI-24-HASH hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa gimli24_hash_absorb(), gimli24_hash_squeeze(), gimli24_hash() + */ +void gimli24_hash_init(gimli24_hash_state_t *state); + +/** + * \brief Aborbs more input data into a GIMLI-24-HASH state. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa gimli24_hash_init(), gimli24_hash_squeeze() + */ +void gimli24_hash_absorb + (gimli24_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Squeezes output data from an GIMLI-24-HASH state. + * + * \param state Hash state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + * + * \sa gimli24_hash_init(), gimli24_hash_absorb() + */ +void gimli24_hash_squeeze + (gimli24_hash_state_t *state, unsigned char *out, + unsigned long long outlen); + +/** + * \brief Returns the final hash value from a GIMLI-24-HASH hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + * + * \note This is a wrapper around gimli24_hash_squeeze() for a fixed length + * of GIMLI24_HASH_SIZE bytes. + * + * \sa gimli24_hash_init(), gimli24_hash_absorb() + */ +void gimli24_hash_finalize + (gimli24_hash_state_t *state, unsigned char *out); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/hash.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys/hash.c new file mode 100644 index 0000000..93789b1 --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "gimli24.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return gimli24_hash(out, in, inlen); +} diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24-avr.S b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24-avr.S new file mode 100644 index 0000000..efcd500 --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24-avr.S @@ -0,0 +1,9419 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gimli24_permute + .type gimli24_permute, @function +gimli24_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + ldi r31,24 + eor r4,r31 + ldi r30,121 + eor r5,r30 + ldi r25,55 + eor r6,r25 + ldi r24,158 + eor r7,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r4 + std Z+1,r5 + std Z+2,r6 + std Z+3,r7 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ld r4,Z + ldd r5,Z+1 + ldd r6,Z+2 + ldd r7,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + ldi r31,20 + eor r18,r31 + ldi r30,121 + eor r19,r30 + ldi r25,55 + eor r20,r25 + ldi r24,158 + eor r21,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + ldi r31,16 + eor r4,r31 + ldi r30,121 + eor r5,r30 + ldi r25,55 + eor r6,r25 + ldi r24,158 + eor r7,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r4 + std Z+1,r5 + std Z+2,r6 + std Z+3,r7 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ld r4,Z + ldd r5,Z+1 + ldd r6,Z+2 + ldd r7,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + ldi r31,12 + eor r18,r31 + ldi r30,121 + eor r19,r30 + ldi r25,55 + eor r20,r25 + ldi r24,158 + eor r21,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + ldi r31,8 + eor r4,r31 + ldi r30,121 + eor r5,r30 + ldi r25,55 + eor r6,r25 + ldi r24,158 + eor r7,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r4 + std Z+1,r5 + std Z+2,r6 + std Z+3,r7 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + ld r4,Z + ldd r5,Z+1 + ldd r6,Z+2 + ldd r7,Z+3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r28,Z+32 + ldd r29,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + ldd r12,Z+36 + ldd r13,Z+37 + ldd r14,Z+38 + ldd r15,Z+39 + pop r18 + pop r19 + pop r20 + pop r21 + pop r4 + pop r5 + pop r6 + pop r7 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r27 + mov r5,r22 + mov r6,r23 + mov r7,r26 + and r4,r28 + and r5,r29 + and r6,r2 + and r7,r3 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r4 + movw r2,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r11 + mov r19,r8 + mov r20,r9 + mov r21,r10 + and r18,r12 + and r19,r13 + and r20,r14 + and r21,r15 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r18 + movw r14,r20 + movw r18,r30 + movw r20,r24 + ldi r31,4 + eor r18,r31 + ldi r30,121 + eor r19,r30 + ldi r25,55 + eor r20,r25 + ldi r24,158 + eor r21,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + pop r30 + pop r31 + push r21 + push r20 + push r19 + push r18 + push r7 + push r6 + push r5 + push r4 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+32,r28 + std Z+33,r29 + std Z+34,r2 + std Z+35,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + std Z+36,r12 + std Z+37,r13 + std Z+38,r14 + std Z+39,r15 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + pop r4 + pop r5 + pop r6 + pop r7 + pop r18 + pop r19 + pop r20 + pop r21 + push r31 + push r30 + bst r26,7 + lsl r27 + rol r22 + rol r23 + rol r26 + bld r27,0 + mov r30,r19 + mov r31,r20 + mov r24,r21 + mov r25,r18 + movw r16,r28 + mov r1,r2 + mov r0,r3 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r18,r27 + mov r19,r22 + mov r20,r23 + mov r21,r26 + and r18,r28 + and r19,r29 + and r20,r2 + and r21,r3 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r18,r16 + eor r19,r17 + eor r20,r1 + eor r21,r0 + eor r18,r30 + eor r19,r31 + eor r20,r24 + eor r21,r25 + mov r16,r27 + mov r17,r22 + mov r1,r23 + mov r0,r26 + movw r22,r30 + movw r26,r24 + or r22,r28 + or r23,r29 + or r26,r2 + or r27,r3 + lsl r22 + rol r23 + rol r26 + rol r27 + eor r22,r30 + eor r23,r31 + eor r26,r24 + eor r27,r25 + eor r22,r16 + eor r23,r17 + eor r26,r1 + eor r27,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r28 + eor r31,r29 + eor r24,r2 + eor r25,r3 + movw r28,r18 + movw r2,r20 + movw r18,r30 + movw r20,r24 + bst r10,7 + lsl r11 + rol r8 + rol r9 + rol r10 + bld r11,0 + mov r30,r5 + mov r31,r6 + mov r24,r7 + mov r25,r4 + movw r16,r12 + mov r1,r14 + mov r0,r15 + lsl r16 + rol r17 + rol r1 + rol r0 + mov r4,r11 + mov r5,r8 + mov r6,r9 + mov r7,r10 + and r4,r12 + and r5,r13 + and r6,r14 + and r7,r15 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r4,r16 + eor r5,r17 + eor r6,r1 + eor r7,r0 + eor r4,r30 + eor r5,r31 + eor r6,r24 + eor r7,r25 + mov r16,r11 + mov r17,r8 + mov r1,r9 + mov r0,r10 + movw r8,r30 + movw r10,r24 + or r8,r12 + or r9,r13 + or r10,r14 + or r11,r15 + lsl r8 + rol r9 + rol r10 + rol r11 + eor r8,r30 + eor r9,r31 + eor r10,r24 + eor r11,r25 + eor r8,r16 + eor r9,r17 + eor r10,r1 + eor r11,r0 + and r30,r16 + and r31,r17 + and r24,r1 + and r25,r0 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + lsl r30 + rol r31 + rol r24 + rol r25 + eor r30,r16 + eor r31,r17 + eor r24,r1 + eor r25,r0 + eor r30,r12 + eor r31,r13 + eor r24,r14 + eor r25,r15 + movw r12,r4 + movw r14,r6 + movw r4,r30 + movw r6,r24 + pop r30 + pop r31 + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + eor r1,r1 + ret + .size gimli24_permute, .-gimli24_permute + +#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.c b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.c new file mode 100644 index 0000000..d719988 --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-gimli24.h" + +#if !defined(__AVR__) + +/* Apply the SP-box to a specific column in the state array */ +#define GIMLI24_SP(s0, s4, s8) \ + do { \ + x = leftRotate24(s0); \ + y = leftRotate9(s4); \ + s4 = y ^ x ^ ((x | s8) << 1); \ + s0 = s8 ^ y ^ ((x & y) << 3); \ + s8 = x ^ (s8 << 1) ^ ((y & s8) << 2); \ + } while (0) + +void gimli24_permute(uint32_t state[12]) +{ + uint32_t s0, s1, s2, s3, s4, s5; + uint32_t s6, s7, s8, s9, s10, s11; + uint32_t x, y; + unsigned round; + + /* Load the state into local variables and convert from little-endian */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + s0 = state[0]; + s1 = state[1]; + s2 = state[2]; + s3 = state[3]; + s4 = state[4]; + s5 = state[5]; + s6 = state[6]; + s7 = state[7]; + s8 = state[8]; + s9 = state[9]; + s10 = state[10]; + s11 = state[11]; +#else + s0 = le_load_word32((const unsigned char *)(&(state[0]))); + s1 = le_load_word32((const unsigned char *)(&(state[1]))); + s2 = le_load_word32((const unsigned char *)(&(state[2]))); + s3 = le_load_word32((const unsigned char *)(&(state[3]))); + s4 = le_load_word32((const unsigned char *)(&(state[4]))); + s5 = le_load_word32((const unsigned char *)(&(state[5]))); + s6 = le_load_word32((const unsigned char *)(&(state[6]))); + s7 = le_load_word32((const unsigned char *)(&(state[7]))); + s8 = le_load_word32((const unsigned char *)(&(state[8]))); + s9 = le_load_word32((const unsigned char *)(&(state[9]))); + s10 = le_load_word32((const unsigned char *)(&(state[10]))); + s11 = le_load_word32((const unsigned char *)(&(state[11]))); +#endif + + /* Unroll and perform the rounds 4 at a time */ + for (round = 24; round > 0; round -= 4) { + /* Round 0: SP-box, small swap, add round constant */ + GIMLI24_SP(s0, s4, s8); + GIMLI24_SP(s1, s5, s9); + GIMLI24_SP(s2, s6, s10); + GIMLI24_SP(s3, s7, s11); + x = s0; + y = s2; + s0 = s1 ^ 0x9e377900U ^ round; + s1 = x; + s2 = s3; + s3 = y; + + /* Round 1: SP-box only */ + GIMLI24_SP(s0, s4, s8); + GIMLI24_SP(s1, s5, s9); + GIMLI24_SP(s2, s6, s10); + GIMLI24_SP(s3, s7, s11); + + /* Round 2: SP-box, big swap */ + GIMLI24_SP(s0, s4, s8); + GIMLI24_SP(s1, s5, s9); + GIMLI24_SP(s2, s6, s10); + GIMLI24_SP(s3, s7, s11); + x = s0; + y = s1; + s0 = s2; + s1 = s3; + s2 = x; + s3 = y; + + /* Round 3: SP-box only */ + GIMLI24_SP(s0, s4, s8); + GIMLI24_SP(s1, s5, s9); + GIMLI24_SP(s2, s6, s10); + GIMLI24_SP(s3, s7, s11); + } + + /* Convert state to little-endian if the platform is not little-endian */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state[0] = s0; + state[1] = s1; + state[2] = s2; + state[3] = s3; + state[4] = s4; + state[5] = s5; + state[6] = s6; + state[7] = s7; + state[8] = s8; + state[9] = s9; + state[10] = s10; + state[11] = s11; +#else + le_store_word32(((unsigned char *)(&(state[0]))), s0); + le_store_word32(((unsigned char *)(&(state[1]))), s1); + le_store_word32(((unsigned char *)(&(state[2]))), s2); + le_store_word32(((unsigned char *)(&(state[3]))), s3); + le_store_word32(((unsigned char *)(&(state[4]))), s4); + le_store_word32(((unsigned char *)(&(state[5]))), s5); + le_store_word32(((unsigned char *)(&(state[6]))), s6); + le_store_word32(((unsigned char *)(&(state[7]))), s7); + le_store_word32(((unsigned char *)(&(state[8]))), s8); + le_store_word32(((unsigned char *)(&(state[9]))), s9); + le_store_word32(((unsigned char *)(&(state[10]))), s10); + le_store_word32(((unsigned char *)(&(state[11]))), s11); +#endif +} + +#endif /* !__AVR__ */ diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.h new file mode 100644 index 0000000..c81ead1 --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-gimli24.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIMLI24_H +#define LW_INTERNAL_GIMLI24_H + +#include "internal-util.h" + +/** + * \file internal-gimli24.h + * \brief Internal implementation of the GIMLI-24 permutation. + * + * References: https://gimli.cr.yp.to/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Permutes the GIMLI-24 state. + * + * \param state The GIMLI-24 state to be permuted. + * + * The input and output \a state will be in little-endian byte order. + */ +void gimli24_permute(uint32_t state[12]); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-util.h b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/gimli/Implementations/crypto_hash/gimli24v1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.c b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.h b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/api.h b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/api.h deleted file mode 100644 index 32c9622..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/encrypt.c b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/encrypt.c deleted file mode 100644 index 2724d30..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "grain128.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return grain128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return grain128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.c b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.c deleted file mode 100644 index fa41b64..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "grain128.h" -#include "internal-grain128.h" -#include - -aead_cipher_t const grain128_aead_cipher = { - "Grain-128AEAD", - GRAIN128_KEY_SIZE, - GRAIN128_NONCE_SIZE, - GRAIN128_TAG_SIZE, - AEAD_FLAG_NONE, - grain128_aead_encrypt, - grain128_aead_decrypt -}; - -/** - * \brief Encodes the associated data length in DER. - * - * \param buf The buffer to encode the length into. - * \param adlen The length of the associated data in bytes, which must be - * less than 2^32 to limit the length of the DER encoding to 5 bytes. - * - * \return The length of the DER encoding that was written to \a buf. - */ -static unsigned grain128_encode_adlen - (unsigned char buf[5], unsigned long long adlen) -{ - if (adlen < 0x80U) { - buf[0] = (unsigned char)adlen; - return 1; - } else if (adlen < 0x100U) { - buf[0] = 0x81; - buf[1] = (unsigned char)adlen; - return 2; - } else if (adlen < 0x10000U) { - buf[0] = 0x82; - buf[1] = (unsigned char)(adlen >> 8); - buf[2] = (unsigned char)adlen; - return 3; - } else if (adlen < 0x1000000U) { - buf[0] = 0x83; - buf[1] = (unsigned char)(adlen >> 16); - buf[2] = (unsigned char)(adlen >> 8); - buf[3] = (unsigned char)adlen; - return 4; - } else { - buf[0] = 0x84; - buf[1] = (unsigned char)(adlen >> 24); - buf[2] = (unsigned char)(adlen >> 16); - buf[3] = (unsigned char)(adlen >> 8); - buf[4] = (unsigned char)adlen; - return 5; - } -} - -int grain128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - grain128_state_t state; - unsigned char der[5]; - unsigned derlen; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + GRAIN128_TAG_SIZE; - - /* Limit the amount of associated data to make DER encoding easier */ - if (adlen >= 0x100000000ULL) - return -2; - - /* Initialize the Grain-128 stream cipher with the key and nonce */ - grain128_setup(&state, k, npub); - - /* Authenticate the associated data, prefixed with the DER-encoded length */ - derlen = grain128_encode_adlen(der, adlen); - grain128_authenticate(&state, der, derlen); - grain128_authenticate(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - grain128_encrypt(&state, c, m, mlen); - - /* Generate the authentication tag */ - grain128_compute_tag(&state); - memcpy(c + mlen, state.ks, GRAIN128_TAG_SIZE); - return 0; -} - -int grain128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - grain128_state_t state; - unsigned char der[5]; - unsigned derlen; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < GRAIN128_TAG_SIZE) - return -1; - *mlen = clen - GRAIN128_TAG_SIZE; - - /* Limit the amount of associated data to make DER encoding easier */ - if (adlen >= 0x100000000ULL) - return -2; - - /* Initialize the Grain-128 stream cipher with the key and nonce */ - grain128_setup(&state, k, npub); - - /* Authenticate the associated data, prefixed with the DER-encoded length */ - derlen = grain128_encode_adlen(der, adlen); - grain128_authenticate(&state, der, derlen); - grain128_authenticate(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= GRAIN128_TAG_SIZE; - grain128_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - grain128_compute_tag(&state); - return aead_check_tag(m, clen, state.ks, c + clen, GRAIN128_TAG_SIZE); -} diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.h b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.h deleted file mode 100644 index c8d6de9..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/grain128.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_GRAIN128_H -#define LWCRYPTO_GRAIN128_H - -#include "aead-common.h" - -/** - * \file grain128.h - * \brief Grain-128AEAD authenticated encryption algorithm. - * - * Grain-128AEAD is an authenticated encryption algorithm based around a - * combination of a 128-bit linear feedback shift register (LFSR) and a - * 128-bit non-linear feedback shift register (NFSR). It is a member of - * the Grain family of stream ciphers. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Grain-128AEAD. - */ -#define GRAIN128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Grain-128AEAD. - */ -#define GRAIN128_TAG_SIZE 8 - -/** - * \brief Size of the nonce for Grain-128AEAD. - */ -#define GRAIN128_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Grain-128AEAD cipher. - */ -extern aead_cipher_t const grain128_aead_cipher; - -/** - * \brief Encrypts and authenticates a packet with Grain-128AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa grain128_aead_decrypt() - */ -int grain128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Grain-128AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa grain128_aead_encrypt() - */ -int grain128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.c b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.c deleted file mode 100644 index d0d71ea..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-grain128.h" - -/* Extracts 32 bits from the Grain state that are not word-aligned */ -#define GWORD(a, b, start_bit) \ - (((a) << ((start_bit) % 32)) ^ ((b) >> (32 - ((start_bit) % 32)))) - -/** - * \brief Performs 32 rounds of Grain-128 in parallel. - * - * \param state Grain-128 state. - * \param x 32 bits of input to be incorporated into the LFSR state, or zero. - * \param x2 Another 32 bits to be incorporated into the NFSR state, or zero. - */ -static void grain128_core - (grain128_state_t *state, uint32_t x, uint32_t x2) -{ - uint32_t s0, s1, s2, s3; - - /* From the Grain-128AEAD specification, the LFSR feedback algorithm is: - * - * s'[i] = s[i + 1] - * s'[127] = s[0] ^ s[7] ^ s[38] ^ s[70] ^ s[81] ^ s[96] ^ x - * - * The bits are numbered from the most significant bit in the first - * word of the LFSR state. Calculate the feedback bits 32 at a time. - */ - s0 = state->lfsr[0]; - s1 = state->lfsr[1]; - s2 = state->lfsr[2]; - s3 = state->lfsr[3]; - x ^= s0; /* s[0] */ - x ^= GWORD(s0, s1, 7); /* s[7] */ - x ^= GWORD(s1, s2, 38); /* s[38] */ - x ^= GWORD(s2, s3, 70); /* s[70] */ - x ^= GWORD(s2, s3, 81); /* s[81] */ - x ^= s3; /* s[96] */ - - /* Rotate the LFSR state left by 32 bits and feed s0 into the NFSR */ - state->lfsr[0] = s1; - state->lfsr[1] = s2; - state->lfsr[2] = s3; - state->lfsr[3] = x; - x2 ^= s0; - - /* Perform the NFSR feedback algorithm from the specification: - * - * b'[i] = b[i + 1] - * b'[127] = s'[127] ^ b[0] ^ b[26] ^ b[56] ^ b[91] ^ b[96] - * ^ (b[3] & b[67]) ^ (b[11] & b[13]) ^ (b[17] & b[18]) - * ^ (b[27] & b[59]) ^ (b[40] & b[48]) ^ (b[61] & b[65]) - * ^ (b[68] & b[84]) ^ (b[22] & b[24] & b[25]) - * ^ (b[70] & b[78] & b[82]) - * ^ (b[88] & b[92] & b[93] & b[95]) ^ x2 - * - * Once again, we calculate 32 feedback bits in parallel. - */ - s0 = state->nfsr[0]; - s1 = state->nfsr[1]; - s2 = state->nfsr[2]; - s3 = state->nfsr[3]; - x2 ^= s0; /* b[0] */ - x2 ^= GWORD(s0, s1, 26); /* b[26] */ - x2 ^= GWORD(s1, s2, 56); /* b[56] */ - x2 ^= GWORD(s2, s3, 91); /* b[91] */ - x2 ^= s3; /* b[96] */ - x2 ^= GWORD(s0, s1, 3) & GWORD(s2, s3, 67); /* b[3] & b[67] */ - x2 ^= GWORD(s0, s1, 11) & GWORD(s0, s1, 13); /* b[11] & b[13] */ - x2 ^= GWORD(s0, s1, 17) & GWORD(s0, s1, 18); /* b[17] & b[18] */ - x2 ^= GWORD(s0, s1, 27) & GWORD(s1, s2, 59); /* b[27] & b[59] */ - x2 ^= GWORD(s1, s2, 40) & GWORD(s1, s2, 48); /* b[40] & b[48] */ - x2 ^= GWORD(s1, s2, 61) & GWORD(s2, s3, 65); /* b[61] & b[65] */ - x2 ^= GWORD(s2, s3, 68) & GWORD(s2, s3, 84); /* b[68] & b[84] */ - x2 ^= GWORD(s0, s1, 22) & GWORD(s0, s1, 24) & /* b[22] & b[24] & b[25] */ - GWORD(s0, s1, 25); - x2 ^= GWORD(s2, s3, 70) & GWORD(s2, s3, 78) & /* b[70] & b[78] & b[82] */ - GWORD(s2, s3, 82); - x2 ^= GWORD(s2, s3, 88) & GWORD(s2, s3, 92) & /* b[88] & b[92] ... */ - GWORD(s2, s3, 93) & GWORD(s2, s3, 95); /* ... & b[93] & b[95] */ - - /* Rotate the NFSR state left by 32 bits */ - state->nfsr[0] = s1; - state->nfsr[1] = s2; - state->nfsr[2] = s3; - state->nfsr[3] = x2; -} - -/** - * \brief Generates 32 bits of pre-output data. - * - * \param state Grain-128 state. - * - * \return The generated 32 bits of pre-output data. - */ -static uint32_t grain128_preoutput(const grain128_state_t *state) -{ - uint32_t s0, s1, s2, s3; - uint32_t b0, b1, b2, b3; - uint32_t x0, x4, y; - - /* From the Grain-128AEAD specification, each pre-output bit y is given by: - * - * x[0..8] = b[12], s[8], s[13], s[20], b[95], - * s[42], s[60], s[79], s[94] - * h(x) = (x[0] & x[1]) ^ (x[2] & x[3]) ^ (x[4] & x[5]) - * ^ (x[6] & x[7]) ^ (x[0] & x[4] & x[8]) - * y = h(x) ^ s[93] ^ b[2] ^ b[15] ^ b[36] ^ b[45] - * ^ b[64] ^ b[73] ^ b[89] - * - * Calculate 32 pre-output bits in parallel. - */ - s0 = state->lfsr[0]; - s1 = state->lfsr[1]; - s2 = state->lfsr[2]; - s3 = state->lfsr[3]; - b0 = state->nfsr[0]; - b1 = state->nfsr[1]; - b2 = state->nfsr[2]; - b3 = state->nfsr[3]; - x0 = GWORD(b0, b1, 12); - x4 = GWORD(b2, b3, 95); - y = (x0 & GWORD(s0, s1, 8)); /* x[0] & x[1] */ - y ^= (GWORD(s0, s1, 13) & GWORD(s0, s1, 20)); /* x[2] & x[3] */ - y ^= (x4 & GWORD(s1, s2, 42)); /* x[4] & x[5] */ - y ^= (GWORD(s1, s2, 60) & GWORD(s2, s3, 79)); /* x[6] & x[7] */ - y ^= (x0 & x4 & GWORD(s2, s3, 94)); /* x[0] & x[4] & x[8] */ - y ^= GWORD(s2, s3, 93); /* s[93] */ - y ^= GWORD(b0, b1, 2); /* b[2] */ - y ^= GWORD(b0, b1, 15); /* b[15] */ - y ^= GWORD(b1, b2, 36); /* b[36] */ - y ^= GWORD(b1, b2, 45); /* b[45] */ - y ^= b2; /* b[64] */ - y ^= GWORD(b2, b3, 73); /* b[73] */ - y ^= GWORD(b2, b3, 89); /* b[89] */ - return y; -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step_simple */ -#define bit_permute_step_simple(_y, mask, shift) \ - do { \ - (_y) = (((_y) & (mask)) << (shift)) | (((_y) >> (shift)) & (mask)); \ - } while (0) - -void grain128_setup - (grain128_state_t *state, const unsigned char *key, - const unsigned char *nonce) -{ - uint32_t k[4]; - unsigned round; - - /* Internally, the Grain-128 stream cipher uses big endian bit - * order, but the Grain-128AEAD specification for NIST uses little - * endian bit order. We need to swap the bits around when we load - * the bits of the key and the nonce. - * - * Permutation generated with "http://programming.sirrida.de/calcperm.php". - * - * P = [7 6 5 4 3 2 1 0 15 14 13 12 11 10 9 8 - * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24] - */ - #define SWAP_BITS(out, in) \ - do { \ - uint32_t tmp = (in); \ - bit_permute_step_simple(tmp, 0x55555555, 1); \ - bit_permute_step_simple(tmp, 0x33333333, 2); \ - bit_permute_step_simple(tmp, 0x0f0f0f0f, 4); \ - (out) = tmp; \ - } while (0) - - /* Initialize the LFSR state with the nonce and padding */ - SWAP_BITS(state->lfsr[0], be_load_word32(nonce)); - SWAP_BITS(state->lfsr[1], be_load_word32(nonce + 4)); - SWAP_BITS(state->lfsr[2], be_load_word32(nonce + 8)); - state->lfsr[3] = 0xFFFFFFFEU; /* pad with all-1s and a terminating 0 */ - - /* Initialize the NFSR state with the key */ - SWAP_BITS(k[0], be_load_word32(key)); - SWAP_BITS(k[1], be_load_word32(key + 4)); - SWAP_BITS(k[2], be_load_word32(key + 8)); - SWAP_BITS(k[3], be_load_word32(key + 12)); - state->nfsr[0] = k[0]; - state->nfsr[1] = k[1]; - state->nfsr[2] = k[2]; - state->nfsr[3] = k[3]; - - /* Perform 256 rounds of Grain-128 to mix up the initial state. - * The rounds can be performed 32 at a time: 32 * 8 = 256 */ - for (round = 0; round < 8; ++round) { - uint32_t y = grain128_preoutput(state); - grain128_core(state, y, y); - } - - /* Absorb the key into the state again and generate the initial - * state of the accumulator and the shift register */ - state->accum = ((uint64_t)(grain128_preoutput(state))) << 32; - grain128_core(state, k[0], 0); - state->accum |= grain128_preoutput(state); - grain128_core(state, k[1], 0); - state->sr = ((uint64_t)(grain128_preoutput(state))) << 32; - grain128_core(state, k[2], 0); - state->sr |= grain128_preoutput(state); - grain128_core(state, k[3], 0); - - /* No keystream data has been generated yet */ - state->posn = sizeof(state->ks); -} - -/** - * \brief Generates the next 16 byte block of keystream output data. - * - * \param state Grain-128 state. - */ -static void grain128_next_keystream(grain128_state_t *state) -{ - unsigned posn; - for (posn = 0; posn < sizeof(state->ks); posn += 4) { - /* Get the next word of pre-output and run the Grain-128 core */ - uint32_t x = grain128_preoutput(state); - grain128_core(state, 0, 0); - - /* Grain-128 uses big-endian bit order, but the NIST functions - * that are built on top of this use little-endian bit order. - * Swap the bits around so that they are ready for use later. - * - * We also need to separate the bits: even bits are used to encrypt - * and odd bits are used to authenticate. Shift them to separate - * bytes to make it easier to access the even and odd bits later. - * - * P = [7 15 6 14 5 13 4 12 3 11 2 10 1 9 0 8 - * 23 31 22 30 21 29 20 28 19 27 18 26 17 25 16 24] - */ - bit_permute_step(x, 0x11111111, 3); - bit_permute_step(x, 0x03030303, 6); - bit_permute_step(x, 0x000f000f, 12); - bit_permute_step_simple(x, 0x00ff00ff, 8); - be_store_word32(state->ks + posn, x); - } -} - -void grain128_authenticate - (grain128_state_t *state, const unsigned char *data, - unsigned long long len) -{ - unsigned char abyte; - unsigned char sbyte; - unsigned char bit; - uint64_t accum = state->accum; - uint64_t sr = state->sr; - unsigned posn = state->posn; - while (len > 0) { - /* Fetch the next byte to be authenticated */ - abyte = *data++; - --len; - - /* Request more keystream data if necessary */ - if (posn >= sizeof(state->ks)) { - grain128_next_keystream(state); - posn = 0; - } - - /* Get the next byte of keystream to add to the shift register. - * We use the odd bytes from the keystream and ignore even ones */ - sbyte = state->ks[posn + 1]; - posn += 2; - - /* XOR the shift register with the accumulator for each 1 bit - * in the byte that we are authenticating. And shift in the - * keystream byte we retrieved above */ - for (bit = 0; bit < 8; ++bit) { - accum ^= sr & (-((uint64_t)(abyte & 0x01))); - sr = (sr << 1) ^ (sbyte & 0x01); - abyte >>= 1; - sbyte >>= 1; - } - } - state->accum = accum; - state->sr = sr; - state->posn = posn; -} - -void grain128_encrypt - (grain128_state_t *state, unsigned char *c, const unsigned char *m, - unsigned long long len) -{ - unsigned char mbyte; - unsigned char sbyte; - unsigned char bit; - uint64_t accum = state->accum; - uint64_t sr = state->sr; - unsigned posn = state->posn; - while (len > 0) { - /* Fetch the next byte to be encrypted and authenticated */ - mbyte = *m++; - --len; - - /* Request more keystream data if necessary */ - if (posn >= sizeof(state->ks)) { - grain128_next_keystream(state); - posn = 0; - } - - /* Get the next two bytes of keystream data. The even byte is - * used to encrypt the input and the odd byte is shifted into - * the shift register for authentication purposes */ - *c++ = mbyte ^ state->ks[posn]; - sbyte = state->ks[posn + 1]; - posn += 2; - - /* XOR the shift register with the accumulator for each 1 bit - * in the plaintext byte that we are authenticating. And shift - * in the keystream byte we retrieved above */ - for (bit = 0; bit < 8; ++bit) { - accum ^= sr & (-((uint64_t)(mbyte & 0x01))); - sr = (sr << 1) ^ (sbyte & 0x01); - mbyte >>= 1; - sbyte >>= 1; - } - } - state->accum = accum; - state->sr = sr; - state->posn = posn; -} - -void grain128_decrypt - (grain128_state_t *state, unsigned char *m, const unsigned char *c, - unsigned long long len) -{ - unsigned char mbyte; - unsigned char sbyte; - unsigned char bit; - uint64_t accum = state->accum; - uint64_t sr = state->sr; - unsigned posn = state->posn; - while (len > 0) { - /* Fetch the next byte to be decrypted and authenticated */ - mbyte = *c++; - --len; - - /* Request more keystream data if necessary */ - if (posn >= sizeof(state->ks)) { - grain128_next_keystream(state); - posn = 0; - } - - /* Get the next two bytes of keystream data. The even byte is - * used to decrypt the input and the odd byte is shifted into - * the shift register for authentication purposes */ - mbyte ^= state->ks[posn]; - *m++ = mbyte; - sbyte = state->ks[posn + 1]; - posn += 2; - - /* XOR the shift register with the accumulator for each 1 bit - * in the plaintext byte that we are authenticating. And shift - * in the keystream byte we retrieved above */ - for (bit = 0; bit < 8; ++bit) { - accum ^= sr & (-((uint64_t)(mbyte & 0x01))); - sr = (sr << 1) ^ (sbyte & 0x01); - mbyte >>= 1; - sbyte >>= 1; - } - } - state->accum = accum; - state->sr = sr; - state->posn = posn; -} - -void grain128_compute_tag(grain128_state_t *state) -{ - uint64_t x; - - /* Authenticate a final 1 bit as padding on the stream */ - state->accum ^= state->sr; - - /* Swap the bits of the accumulator into little endian - * order and write them to the keystream buffer */ - x = state->accum; - bit_permute_step_simple(x, 0x5555555555555555ULL, 1); - bit_permute_step_simple(x, 0x3333333333333333ULL, 2); - bit_permute_step_simple(x, 0x0f0f0f0f0f0f0f0fULL, 4); - be_store_word64(state->ks, x); -} diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.h b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.h deleted file mode 100644 index 4c3a6e4..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-grain128.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GRAIN128_H -#define LW_INTERNAL_GRAIN128_H - -#include "internal-util.h" - -/** - * \file internal-grain128.h - * \brief Internal implementation of the Grain-128 stream cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Representation of the state of Grain-128. - * - * Note: The specification numbers bits starting with the most significant, - * so bit 0 is in the highest bit of the first word of each field below. - */ -typedef struct -{ - uint32_t lfsr[4]; /**< 128-bit LFSR state for Grain-128 */ - uint32_t nfsr[4]; /**< 128-bit NFSR state for Grain-128 */ - uint64_t accum; /**< 64-bit accumulator for authentication */ - uint64_t sr; /**< 64-bit shift register for authentication */ - unsigned char ks[16]; /**< Keystream block for auth or encrypt mode */ - unsigned posn; /**< Current position within the keystream */ - -} grain128_state_t; - -/** - * \brief Sets up the initial Grain-128 state with the key and nonce. - * - * \param state Grain-128 state to be initialized. - * \param key Points to the 128-bit key. - * \param nonce Points to the 96-bit nonce. - */ -void grain128_setup - (grain128_state_t *state, const unsigned char *key, - const unsigned char *nonce); - -/** - * \brief Authenticates data with Grain-128. - * - * \param state Grain-128 state. - * \param data Points to the data to be authenticated. - * \param len Length of the data to be authenticated. - */ -void grain128_authenticate - (grain128_state_t *state, const unsigned char *data, - unsigned long long len); - -/** - * \brief Encrypts and authenticates data with Grain-128. - * - * \param state Grain-128 state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param len Length of the data to be encrypted. - */ -void grain128_encrypt - (grain128_state_t *state, unsigned char *c, const unsigned char *m, - unsigned long long len); - -/** - * \brief Decrypts and authenticates data with Grain-128. - * - * \param state Grain-128 state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param len Length of the data to be decrypted. - */ -void grain128_decrypt - (grain128_state_t *state, unsigned char *m, const unsigned char *c, - unsigned long long len); - -/** - * \brief Computes the final authentiation tag. - * - * \param state Grain-128 state. - * - * The final authentication tag is written to the first 8 bytes of state->ks. - */ -void grain128_compute_tag(grain128_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-util.h b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys/internal-util.h b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys/internal-util.h index e79158c..e30166d 100644 --- a/grain-128aead/Implementations/crypto_aead/grain128aead/rhys/internal-util.h +++ b/grain-128aead/Implementations/crypto_aead/grain128aead/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.c b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.h b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/api.h b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/encrypt.c b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/encrypt.c deleted file mode 100644 index db50784..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "hyena.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return hyena_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return hyena_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.c b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.c deleted file mode 100644 index db5ba2b..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.c +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "hyena.h" -#include "internal-gift128.h" -#include "internal-util.h" -#include - -aead_cipher_t const hyena_cipher = { - "HYENA", - HYENA_KEY_SIZE, - HYENA_NONCE_SIZE, - HYENA_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - hyena_aead_encrypt, - hyena_aead_decrypt -}; - -/** - * \brief Doubles a delta value in the F(2^64) field. - * - * \param D The delta value to be doubled. - * - * D = D << 1 if the top-most bit is 0, or D = (D << 1) ^ 0x1B otherwise. - */ -static void hyena_double_delta(unsigned char D[8]) -{ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)(D[0])) >> 7); - for (index = 0; index < 7; ++index) - D[index] = (D[index] << 1) | (D[index + 1] >> 7); - D[7] = (D[7] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Triples a delta value in the F(2^64) field. - * - * \param D The delta value to be tripled. - * - * D' = D ^ (D << 1) if the top-most bit is 0, or D' = D ^ (D << 1) ^ 0x1B - * otherwise. - */ -static void hyena_triple_delta(unsigned char D[8]) -{ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)(D[0])) >> 7); - for (index = 0; index < 7; ++index) - D[index] ^= (D[index] << 1) | (D[index + 1] >> 7); - D[7] ^= (D[7] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Process the associated data for HYENA. - * - * \param ks Key schedule for the GIFT-128 cipher. - * \param Y Internal hash state of HYENA. - * \param D Internal hash state of HYENA. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void hyena_process_ad - (const gift128n_key_schedule_t *ks, unsigned char Y[16], - unsigned char D[8], const unsigned char *ad, - unsigned long long adlen) -{ - unsigned char feedback[16]; - while (adlen > 16) { - hyena_double_delta(D); - memcpy(feedback, ad, 16); - lw_xor_block(feedback + 8, Y + 8, 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block(Y, feedback, 16); - gift128n_encrypt(ks, Y, Y); - ad += 16; - adlen -= 16; - } - if (adlen == 16) { - hyena_triple_delta(D); - memcpy(feedback, ad, 16); - lw_xor_block(feedback + 8, Y + 8, 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block(Y, feedback, 16); - } else { - unsigned temp = (unsigned)adlen; - hyena_triple_delta(D); - hyena_triple_delta(D); - memcpy(feedback, ad, temp); - feedback[temp] = 0x01; - memset(feedback + temp + 1, 0, 15 - temp); - if (temp > 8) - lw_xor_block(feedback + 8, Y + 8, temp - 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block(Y, feedback, 16); - } -} - -int hyena_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gift128n_key_schedule_t ks; - unsigned char Y[16]; - unsigned char D[8]; - unsigned char feedback[16]; - unsigned index; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + HYENA_TAG_SIZE; - - /* Set up the key schedule and use it to encrypt the nonce */ - gift128n_init(&ks, k); - Y[0] = 0; - if (adlen == 0) - Y[0] |= 0x01; - if (adlen == 0 && mlen == 0) - Y[0] |= 0x02; - Y[1] = 0; - Y[2] = 0; - Y[3] = 0; - memcpy(Y + 4, npub, HYENA_NONCE_SIZE); - gift128n_encrypt(&ks, Y, Y); - memcpy(D, Y + 8, 8); - - /* Process the associated data */ - hyena_process_ad(&ks, Y, D, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > 16) { - gift128n_encrypt(&ks, Y, Y); - hyena_double_delta(D); - memcpy(feedback, m, 16); - lw_xor_block(feedback + 8, Y + 8, 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block_2_src(c, m, Y, 16); - lw_xor_block(Y, feedback, 16); - c += 16; - m += 16; - mlen -= 16; - } - gift128n_encrypt(&ks, Y, Y); - if (mlen == 16) { - hyena_triple_delta(D); - memcpy(feedback, m, 16); - lw_xor_block(feedback + 8, Y + 8, 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block_2_src(c, m, Y, 16); - lw_xor_block(Y, feedback, 16); - c += 16; - } else { - unsigned temp = (unsigned)mlen; - hyena_triple_delta(D); - hyena_triple_delta(D); - memcpy(feedback, m, temp); - feedback[temp] = 0x01; - memset(feedback + temp + 1, 0, 15 - temp); - if (temp > 8) - lw_xor_block(feedback + 8, Y + 8, temp - 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block_2_src(c, m, Y, temp); - lw_xor_block(Y, feedback, 16); - c += temp; - } - } - - /* Swap the two halves of Y and generate the authentication tag */ - for (index = 0; index < 8; ++index) { - unsigned char temp1 = Y[index]; - unsigned char temp2 = Y[index + 8]; - Y[index] = temp2; - Y[index + 8] = temp1; - } - gift128n_encrypt(&ks, c, Y); - return 0; -} - -int hyena_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gift128n_key_schedule_t ks; - unsigned char Y[16]; - unsigned char D[8]; - unsigned char feedback[16]; - unsigned char *mtemp; - unsigned index; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < HYENA_TAG_SIZE) - return -1; - *mlen = clen - HYENA_TAG_SIZE; - - /* Set up the key schedule and use it to encrypt the nonce */ - gift128n_init(&ks, k); - Y[0] = 0; - if (adlen == 0) - Y[0] |= 0x01; - if (adlen == 0 && clen == HYENA_TAG_SIZE) - Y[0] |= 0x02; - Y[1] = 0; - Y[2] = 0; - Y[3] = 0; - memcpy(Y + 4, npub, HYENA_NONCE_SIZE); - gift128n_encrypt(&ks, Y, Y); - memcpy(D, Y + 8, 8); - - /* Process the associated data */ - hyena_process_ad(&ks, Y, D, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= HYENA_TAG_SIZE; - mtemp = m; - if (clen > 0) { - while (clen > 16) { - gift128n_encrypt(&ks, Y, Y); - hyena_double_delta(D); - memcpy(feedback + 8, c + 8, 8); - lw_xor_block_2_src(m, c, Y, 16); - memcpy(feedback, m, 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block(Y, feedback, 16); - c += 16; - m += 16; - clen -= 16; - } - gift128n_encrypt(&ks, Y, Y); - if (clen == 16) { - hyena_triple_delta(D); - memcpy(feedback + 8, c + 8, 8); - lw_xor_block_2_src(m, c, Y, 16); - memcpy(feedback, m, 8); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block(Y, feedback, 16); - c += 16; - } else { - unsigned temp = (unsigned)clen; - hyena_triple_delta(D); - hyena_triple_delta(D); - if (temp > 8) { - memcpy(feedback + 8, c + 8, temp - 8); - lw_xor_block_2_src(m, c, Y, temp); - memcpy(feedback, m, 8); - } else { - lw_xor_block_2_src(m, c, Y, temp); - memcpy(feedback, m, temp); - } - feedback[temp] = 0x01; - memset(feedback + temp + 1, 0, 15 - temp); - lw_xor_block(feedback + 8, D, 8); - lw_xor_block(Y, feedback, 16); - c += temp; - } - } - - /* Swap the two halves of Y and check the authentication tag */ - for (index = 0; index < 8; ++index) { - unsigned char temp1 = Y[index]; - unsigned char temp2 = Y[index + 8]; - Y[index] = temp2; - Y[index + 8] = temp1; - } - gift128n_encrypt(&ks, Y, Y); - return aead_check_tag(mtemp, *mlen, Y, c, HYENA_TAG_SIZE); -} diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.h b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.h deleted file mode 100644 index ee9bb9c..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/hyena.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_HYENA_H -#define LWCRYPTO_HYENA_H - -#include "aead-common.h" - -/** - * \file hyena.h - * \brief HYENA authenticated encryption algorithm. - * - * HYENA is an authenticated encryption algorithm that is built around the - * GIFT-128 block cipher. The algorithm has a 128-bit key, a 96-bit nonce, - * and a 128-bit authentication tag. - * - * References: https://www.isical.ac.in/~lightweight/hyena/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for HYENA. - */ -#define HYENA_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for HYENA. - */ -#define HYENA_TAG_SIZE 16 - -/** - * \brief Size of the nonce for HYENA. - */ -#define HYENA_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the HYENA cipher. - */ -extern aead_cipher_t const hyena_cipher; - -/** - * \brief Encrypts and authenticates a packet with HYENA. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa hyena_aead_decrypt() - */ -int hyena_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with HYENA. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa hyena_aead_encrypt() - */ -int hyena_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128-config.h b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128-config.h deleted file mode 100644 index 62131ba..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128-config.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_CONFIG_H -#define LW_INTERNAL_GIFT128_CONFIG_H - -/** - * \file internal-gift128-config.h - * \brief Configures the variant of GIFT-128 to use. - */ - -/** - * \brief Select the full variant of GIFT-128. - * - * The full variant requires 320 bytes for the key schedule and uses the - * fixslicing method to implement encryption and decryption. - */ -#define GIFT128_VARIANT_FULL 0 - -/** - * \brief Select the small variant of GIFT-128. - * - * The small variant requires 80 bytes for the key schedule. The rest - * of the key schedule is expanded on the fly during encryption. - * - * The fixslicing method is used to implement encryption and the slower - * bitslicing method is used to implement decryption. The small variant - * is suitable when memory is at a premium, decryption is not needed, - * but encryption performance is still important. - */ -#define GIFT128_VARIANT_SMALL 1 - -/** - * \brief Select the tiny variant of GIFT-128. - * - * The tiny variant requires 16 bytes for the key schedule and uses the - * bitslicing method to implement encryption and decryption. It is suitable - * for use when memory is very tight and performance is not critical. - */ -#define GIFT128_VARIANT_TINY 2 - -/** - * \def GIFT128_VARIANT - * \brief Selects the default variant of GIFT-128 to use on this platform. - */ -/** - * \def GIFT128_VARIANT_ASM - * \brief Defined to 1 if the GIFT-128 implementation has been replaced - * with an assembly code version. - */ -#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 1 -#endif -#if !defined(GIFT128_VARIANT) -#define GIFT128_VARIANT GIFT128_VARIANT_FULL -#endif -#if !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 0 -#endif - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.c b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.c deleted file mode 100644 index c6ac5ec..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift128.h" -#include "internal-util.h" - -#if !GIFT128_VARIANT_ASM - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC_fixsliced[40] = { - 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, - 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, - 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, - 0x03020180, 0x8000002b, 0x10080880, 0x60014000, 0x01400002, 0x02020080, - 0x80000021, 0x10000080, 0x0001c000, 0x51000002, 0x03010180, 0x8000002e, - 0x10088800, 0x60012000, 0x40500002, 0x01030080, 0x80000006, 0x10008808, - 0xc001a000, 0x14500002, 0x01020181, 0x8000001a -}; - -#endif - -#if GIFT128_VARIANT != GIFT128_VARIANT_FULL - -/* Round constants for GIFT-128 in the bitsliced representation */ -static uint8_t const GIFT128_RC[40] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, - 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A -}; - -#endif - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 8 bits with respect to the next: - * - * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 - * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 - * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 - * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 - * - * The most efficient permutation from the online generator was P3, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P3 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate8(_x); \ - } while (0) -#define PERM1(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate16(_x); \ - } while (0) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate24(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -#define INV_PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x00550055, 9); \ - bit_permute_step(x, 0x00003333, 18); \ - bit_permute_step(x, 0x000f000f, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate8(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) \ - do { \ - uint32_t _x = rightRotate16(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate24(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = (x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); -} - -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); -} - -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift128b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t tmp = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= tmp; \ - (a) ^= tmp << (shift); \ - } while (0) - -/** - * \brief Derives the next 10 fixsliced keys in the key schedule. - * - * \param next Points to the buffer to receive the next 10 keys. - * \param prev Points to the buffer holding the previous 10 keys. - * - * The \a next and \a prev buffers are allowed to be the same. - */ -#define gift128b_derive_keys(next, prev) \ - do { \ - /* Key 0 */ \ - uint32_t s = (prev)[0]; \ - uint32_t t = (prev)[1]; \ - gift128b_swap_move(t, t, 0x00003333U, 16); \ - gift128b_swap_move(t, t, 0x55554444U, 1); \ - (next)[0] = t; \ - /* Key 1 */ \ - s = leftRotate8(s & 0x33333333U) | leftRotate16(s & 0xCCCCCCCCU); \ - gift128b_swap_move(s, s, 0x55551100U, 1); \ - (next)[1] = s; \ - /* Key 2 */ \ - s = (prev)[2]; \ - t = (prev)[3]; \ - (next)[2] = ((t >> 4) & 0x0F000F00U) | ((t & 0x0F000F00U) << 4) | \ - ((t >> 6) & 0x00030003U) | ((t & 0x003F003FU) << 2); \ - /* Key 3 */ \ - (next)[3] = ((s >> 6) & 0x03000300U) | ((s & 0x3F003F00U) << 2) | \ - ((s >> 5) & 0x00070007U) | ((s & 0x001F001FU) << 3); \ - /* Key 4 */ \ - s = (prev)[4]; \ - t = (prev)[5]; \ - (next)[4] = leftRotate8(t & 0xAAAAAAAAU) | \ - leftRotate16(t & 0x55555555U); \ - /* Key 5 */ \ - (next)[5] = leftRotate8(s & 0x55555555U) | \ - leftRotate12(s & 0xAAAAAAAAU); \ - /* Key 6 */ \ - s = (prev)[6]; \ - t = (prev)[7]; \ - (next)[6] = ((t >> 2) & 0x03030303U) | ((t & 0x03030303U) << 2) | \ - ((t >> 1) & 0x70707070U) | ((t & 0x10101010U) << 3); \ - /* Key 7 */ \ - (next)[7] = ((s >> 18) & 0x00003030U) | ((s & 0x01010101U) << 3) | \ - ((s >> 14) & 0x0000C0C0U) | ((s & 0x0000E0E0U) << 15) | \ - ((s >> 1) & 0x07070707U) | ((s & 0x00001010U) << 19); \ - /* Key 8 */ \ - s = (prev)[8]; \ - t = (prev)[9]; \ - (next)[8] = ((t >> 4) & 0x0FFF0000U) | ((t & 0x000F0000U) << 12) | \ - ((t >> 8) & 0x000000FFU) | ((t & 0x000000FFU) << 8); \ - /* Key 9 */ \ - (next)[9] = ((s >> 6) & 0x03FF0000U) | ((s & 0x003F0000U) << 10) | \ - ((s >> 4) & 0x00000FFFU) | ((s & 0x0000000FU) << 12); \ - } while (0) - -/** - * \brief Compute the round keys for GIFT-128 in the fixsliced representation. - * - * \param ks Points to the key schedule to initialize. - * \param k0 First key word. - * \param k1 Second key word. - * \param k2 Third key word. - * \param k3 Fourth key word. - */ -static void gift128b_compute_round_keys - (gift128b_key_schedule_t *ks, - uint32_t k0, uint32_t k1, uint32_t k2, uint32_t k3) -{ - unsigned index; - uint32_t temp; - - /* Set the regular key with k0 and k3 pre-swapped for the round function */ - ks->k[0] = k3; - ks->k[1] = k1; - ks->k[2] = k2; - ks->k[3] = k0; - - /* Pre-compute the keys for rounds 3..10 and permute into fixsliced form */ - for (index = 4; index < 20; index += 2) { - ks->k[index] = ks->k[index - 3]; - temp = ks->k[index - 4]; - temp = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - ks->k[index + 1] = temp; - } - for (index = 0; index < 20; index += 10) { - /* Keys 0 and 10 */ - temp = ks->k[index]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index] = temp; - - /* Keys 1 and 11 */ - temp = ks->k[index + 1]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 1] = temp; - - /* Keys 2 and 12 */ - temp = ks->k[index + 2]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 2] = temp; - - /* Keys 3 and 13 */ - temp = ks->k[index + 3]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 3] = temp; - - /* Keys 4 and 14 */ - temp = ks->k[index + 4]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 4] = temp; - - /* Keys 5 and 15 */ - temp = ks->k[index + 5]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 5] = temp; - - /* Keys 6 and 16 */ - temp = ks->k[index + 6]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 6] = temp; - - /* Keys 7 and 17 */ - temp = ks->k[index + 7]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 7] = temp; - - /* Keys 8, 9, 18, and 19 do not need any adjustment */ - } - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - /* Derive the fixsliced keys for the remaining rounds 11..40 */ - for (index = 20; index < 80; index += 10) { - gift128b_derive_keys(ks->k + index, ks->k + index - 20); - } -#endif -} - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - gift128b_compute_round_keys - (ks, be_load_word32(key), be_load_word32(key + 4), - be_load_word32(key + 8), be_load_word32(key + 12)); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission */ - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); -} - -/** - * \brief Performs the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s3 ^= 0xFFFFFFFFU; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s0 ^= 0xFFFFFFFFU; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/** - * \brief Permutes the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 3) & 0x11111111U) | ((s2 & 0x77777777U) << 1); \ - s3 = ((s3 >> 1) & 0x77777777U) | ((s3 & 0x11111111U) << 3); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 4) & 0x0FFF0FFFU) | ((s0 & 0x000F000FU) << 12); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 12) & 0x000F000FU) | ((s2 & 0x0FFF0FFFU) << 4); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s3 = leftRotate16(s3); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 6) & 0x03030303U) | ((s0 & 0x3F3F3F3FU) << 2); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 2) & 0x3F3F3F3FU) | ((s2 & 0x03030303U) << 6); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = rightRotate8(s2); \ - s3 = leftRotate8(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 1) & 0x77777777U) | ((s2 & 0x11111111U) << 3); \ - s3 = ((s3 >> 3) & 0x11111111U) | ((s3 & 0x77777777U) << 1); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 12) & 0x000F000FU) | ((s0 & 0x0FFF0FFFU) << 4); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 4) & 0x0FFF0FFFU) | ((s2 & 0x000F000FU) << 12); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - s3 = leftRotate16(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 2) & 0x3F3F3F3FU) | ((s0 & 0x03030303U) << 6); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 6) & 0x03030303U) | ((s2 & 0x3F3F3F3FU) << 2); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = leftRotate8(s2); \ - s3 = rightRotate8(s3); \ - } while (0); - -/** - * \brief Performs five fixsliced encryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of five rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 5 rounds. - */ -#define gift128b_encrypt_5_rounds(rk, rc) \ - do { \ - /* 1st round - S-box, rotate left, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_1(s0, s1, s2, s3); \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - \ - /* 2nd round - S-box, rotate up, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_2(s0, s1, s2, s3); \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_3(s0, s1, s2, s3); \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - \ - /* 4th round - S-box, rotate left and swap rows, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_4(s0, s1, s2, s3); \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - \ - /* 5th round - S-box, rotate up, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_5(s0, s1, s2, s3); \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - \ - /* Swap s0 and s3 in preparation for the next 1st round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - } while (0) - -/** - * \brief Performs five fixsliced decryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - */ -#define gift128b_decrypt_5_rounds(rk, rc) \ - do { \ - /* Swap s0 and s3 in preparation for the next 5th round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - \ - /* 5th round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - gift128b_inv_permute_state_5(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 4th round - S-box, rotate right and swap rows, add round key */ \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - gift128b_inv_permute_state_4(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - gift128b_inv_permute_state_3(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 2nd round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - gift128b_inv_permute_state_2(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 1st round - S-box, rotate right, add round key */ \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - gift128b_inv_permute_state_1(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - } while (0) - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - /* Mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = be_load_word32(key + 12); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission - * and mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = le_load_word32(key); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key + 12); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if (((round + 1) % 5) == 0 && round < 39) - s0 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -/* The small variant uses fixslicing for encryption, but we need to change - * to bitslicing for decryption because of the difficulty of fast-forwarding - * the fixsliced key schedule to the end. So the tiny variant is used for - * decryption when the small variant is selected. Since the NIST AEAD modes - * for GIFT-128 only use the block encrypt operation, the inefficiencies - * in decryption don't matter all that much */ - -/** - * \def gift128b_load_and_forward_schedule() - * \brief Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 40; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 10 times for the full 40 rounds. The overall - * effect is to apply a "20 right and 40 left" bit-rotation to every - * word in the key schedule. That is equivalent to "4 right and 8 left" - * on the 16-bit sub-words. - */ -#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#else -/* The small variant needs to also undo some of the rotations that were - * done to generate the fixsliced version of the key schedule */ -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ - gift128b_swap_move(w3, w3, 0x00003333U, 18); \ - gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ - gift128b_swap_move(w3, w3, 0x00550055U, 9); \ - gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ - gift128b_swap_move(w1, w1, 0x00003333U, 18); \ - gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ - gift128b_swap_move(w1, w1, 0x00550055U, 9); \ - gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ - gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ - gift128b_swap_move(w2, w2, 0x03030303U, 6); \ - gift128b_swap_move(w2, w2, 0x11111111U, 3); \ - gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ - gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ - gift128b_swap_move(w0, w0, 0x03030303U, 6); \ - gift128b_swap_move(w0, w0, 0x11111111U, 3); \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#endif - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if ((round % 5) == 0 && round < 40) - s0 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -#endif /* !GIFT128_VARIANT_ASM */ diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.h b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.h deleted file mode 100644 index f57d143..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_H -#define LW_INTERNAL_GIFT128_H - -/** - * \file internal-gift128.h - * \brief GIFT-128 block cipher. - * - * There are three versions of GIFT-128 in use within the second round - * submissions to the NIST lightweight cryptography competition. - * - * The most efficient version for 32-bit software implementation is the - * GIFT-128-b bit-sliced version from GIFT-COFB and SUNDAE-GIFT. - * - * The second is the nibble-based version from HYENA. We implement the - * HYENA version as a wrapper around the bit-sliced version. - * - * The third version is a variant on the HYENA nibble-based version that - * includes a 4-bit tweak value for domain separation. It is used by - * the ESTATE submission to NIST. - * - * Technically there is a fourth version of GIFT-128 which is the one that - * appeared in the original GIFT-128 paper. It is almost the same as the - * HYENA version except that the byte ordering is big-endian instead of - * HYENA's little-endian. The original version of GIFT-128 doesn't appear - * in any of the NIST submissions so we don't bother with it in this library. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include -#include "internal-gift128-config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of a GIFT-128 block in bytes. - */ -#define GIFT128_BLOCK_SIZE 16 - -/** - * \var GIFT128_ROUND_KEYS - * \brief Number of round keys for the GIFT-128 key schedule. - */ -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY -#define GIFT128_ROUND_KEYS 4 -#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL -#define GIFT128_ROUND_KEYS 20 -#else -#define GIFT128_ROUND_KEYS 80 -#endif - -/** - * \brief Structure of the key schedule for GIFT-128 (bit-sliced). - */ -typedef struct -{ - /** Pre-computed round keys for bit-sliced GIFT-128 */ - uint32_t k[GIFT128_ROUND_KEYS]; - -} gift128b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced and pre-loaded). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version assumes that the input has already been pre-loaded from - * big-endian into host byte order in the supplied word array. The output - * is delivered in the same way. - */ -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Structure of the key schedule for GIFT-128 (nibble-based). - */ -typedef gift128b_key_schedule_t gift128n_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ -#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ -#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ -#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ -#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ -#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ -#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ -#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ -#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ -#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ -#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ -#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ -#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ -#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ -#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ -#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ -#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ - -/** - * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -/** - * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-avr.S deleted file mode 100644 index 2aae304..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-avr.S +++ /dev/null @@ -1,4712 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 40 -table_0: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -302: - rcall 455f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 455f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 455f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 455f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 302b - rjmp 804f -455: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -804: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -370: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - cpse r16,r1 - rjmp 370b - rjmp 867f -522: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -867: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r19,r1 - mov r26,r1 -307: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - movw r20,r2 - movw r22,r4 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - mov r0,r8 - and r0,r22 - eor r12,r0 - mov r0,r9 - and r0,r23 - eor r13,r0 - movw r2,r14 - movw r4,r24 - movw r14,r20 - movw r24,r22 - bst r2,1 - bld r0,0 - bst r2,4 - bld r2,1 - bst r4,0 - bld r2,4 - bst r2,2 - bld r4,0 - bst r3,0 - bld r2,2 - bst r2,3 - bld r3,0 - bst r3,4 - bld r2,3 - bst r4,3 - bld r3,4 - bst r3,6 - bld r4,3 - bst r5,3 - bld r3,6 - bst r3,5 - bld r5,3 - bst r4,7 - bld r3,5 - bst r5,6 - bld r4,7 - bst r5,1 - bld r5,6 - bst r2,5 - bld r5,1 - bst r4,4 - bld r2,5 - bst r4,2 - bld r4,4 - bst r3,2 - bld r4,2 - bst r3,3 - bld r3,2 - bst r3,7 - bld r3,3 - bst r5,7 - bld r3,7 - bst r5,5 - bld r5,7 - bst r4,5 - bld r5,5 - bst r4,6 - bld r4,5 - bst r5,2 - bld r4,6 - bst r3,1 - bld r5,2 - bst r2,7 - bld r3,1 - bst r5,4 - bld r2,7 - bst r4,1 - bld r5,4 - bst r2,6 - bld r4,1 - bst r5,0 - bld r2,6 - bst r0,0 - bld r5,0 - bst r6,0 - bld r0,0 - bst r6,1 - bld r6,0 - bst r6,5 - bld r6,1 - bst r8,5 - bld r6,5 - bst r8,7 - bld r8,5 - bst r9,7 - bld r8,7 - bst r9,6 - bld r9,7 - bst r9,2 - bld r9,6 - bst r7,2 - bld r9,2 - bst r7,0 - bld r7,2 - bst r0,0 - bld r7,0 - bst r6,2 - bld r0,0 - bst r7,1 - bld r6,2 - bst r6,4 - bld r7,1 - bst r8,1 - bld r6,4 - bst r6,7 - bld r8,1 - bst r9,5 - bld r6,7 - bst r8,6 - bld r9,5 - bst r9,3 - bld r8,6 - bst r7,6 - bld r9,3 - bst r9,0 - bld r7,6 - bst r0,0 - bld r9,0 - bst r6,3 - bld r0,0 - bst r7,5 - bld r6,3 - bst r8,4 - bld r7,5 - bst r8,3 - bld r8,4 - bst r7,7 - bld r8,3 - bst r9,4 - bld r7,7 - bst r8,2 - bld r9,4 - bst r7,3 - bld r8,2 - bst r7,4 - bld r7,3 - bst r8,0 - bld r7,4 - bst r0,0 - bld r8,0 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r10,2 - bld r10,0 - bst r11,2 - bld r10,2 - bst r11,1 - bld r11,2 - bst r10,5 - bld r11,1 - bst r12,6 - bld r10,5 - bst r13,0 - bld r12,6 - bst r10,3 - bld r13,0 - bst r11,6 - bld r10,3 - bst r13,1 - bld r11,6 - bst r10,7 - bld r13,1 - bst r13,6 - bld r10,7 - bst r13,3 - bld r13,6 - bst r11,7 - bld r13,3 - bst r13,5 - bld r11,7 - bst r12,7 - bld r13,5 - bst r13,4 - bld r12,7 - bst r12,3 - bld r13,4 - bst r11,4 - bld r12,3 - bst r12,1 - bld r11,4 - bst r10,4 - bld r12,1 - bst r12,2 - bld r10,4 - bst r11,0 - bld r12,2 - bst r10,1 - bld r11,0 - bst r10,6 - bld r10,1 - bst r13,2 - bld r10,6 - bst r11,3 - bld r13,2 - bst r11,5 - bld r11,3 - bst r12,5 - bld r11,5 - bst r12,4 - bld r12,5 - bst r12,0 - bld r12,4 - bst r0,0 - bld r12,0 - bst r14,0 - bld r0,0 - bst r14,3 - bld r14,0 - bst r15,7 - bld r14,3 - bst r25,6 - bld r15,7 - bst r25,0 - bld r25,6 - bst r0,0 - bld r25,0 - bst r14,1 - bld r0,0 - bst r14,7 - bld r14,1 - bst r25,7 - bld r14,7 - bst r25,4 - bld r25,7 - bst r24,0 - bld r25,4 - bst r0,0 - bld r24,0 - bst r14,2 - bld r0,0 - bst r15,3 - bld r14,2 - bst r15,6 - bld r15,3 - bst r25,2 - bld r15,6 - bst r15,0 - bld r25,2 - bst r0,0 - bld r15,0 - bst r14,4 - bld r0,0 - bst r24,3 - bld r14,4 - bst r15,5 - bld r24,3 - bst r24,6 - bld r15,5 - bst r25,1 - bld r24,6 - bst r0,0 - bld r25,1 - bst r14,5 - bld r0,0 - bst r24,7 - bld r14,5 - bst r25,5 - bld r24,7 - bst r24,4 - bld r25,5 - bst r24,1 - bld r24,4 - bst r0,0 - bld r24,1 - bst r14,6 - bld r0,0 - bst r25,3 - bld r14,6 - bst r15,4 - bld r25,3 - bst r24,2 - bld r15,4 - bst r15,1 - bld r24,2 - bst r0,0 - bld r15,1 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldd r20,Y+13 - ldd r21,Y+14 - ldd r22,Y+15 - ldd r23,Y+16 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - adc r20,r1 - lsl r20 - rol r21 - adc r20,r1 - lsl r20 - rol r21 - adc r20,r1 - lsl r20 - rol r21 - adc r20,r1 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - ldd r0,Y+1 - std Y+1,r20 - ldd r20,Y+5 - std Y+5,r0 - ldd r0,Y+9 - std Y+9,r20 - std Y+13,r0 - ldd r0,Y+2 - std Y+2,r21 - ldd r21,Y+6 - std Y+6,r0 - ldd r0,Y+10 - std Y+10,r21 - std Y+14,r0 - ldd r0,Y+3 - std Y+3,r22 - ldd r22,Y+7 - std Y+7,r0 - ldd r0,Y+11 - std Y+11,r22 - std Y+15,r0 - ldd r0,Y+4 - std Y+4,r23 - ldd r23,Y+8 - std Y+8,r0 - ldd r0,Y+12 - std Y+12,r23 - std Y+16,r0 - ldi r20,128 - eor r25,r20 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - inc r19 - cpi r19,40 - breq 727f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 307b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 307b -727: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r19,40 - mov r26,r1 -375: - ldd r0,Y+13 - ldd r20,Y+9 - std Y+9,r0 - ldd r0,Y+5 - std Y+5,r20 - ldd r20,Y+1 - std Y+1,r0 - ldd r0,Y+14 - ldd r21,Y+10 - std Y+10,r0 - ldd r0,Y+6 - std Y+6,r21 - ldd r21,Y+2 - std Y+2,r0 - ldd r0,Y+15 - ldd r22,Y+11 - std Y+11,r0 - ldd r0,Y+7 - std Y+7,r22 - ldd r22,Y+3 - std Y+3,r0 - ldd r0,Y+16 - ldd r23,Y+12 - std Y+12,r0 - ldd r0,Y+8 - std Y+8,r23 - ldd r23,Y+4 - std Y+4,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - or r21,r0 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - std Y+13,r20 - std Y+14,r21 - std Y+15,r22 - std Y+16,r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldi r20,128 - eor r25,r20 - dec r19 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - bst r2,1 - bld r0,0 - bst r5,0 - bld r2,1 - bst r2,6 - bld r5,0 - bst r4,1 - bld r2,6 - bst r5,4 - bld r4,1 - bst r2,7 - bld r5,4 - bst r3,1 - bld r2,7 - bst r5,2 - bld r3,1 - bst r4,6 - bld r5,2 - bst r4,5 - bld r4,6 - bst r5,5 - bld r4,5 - bst r5,7 - bld r5,5 - bst r3,7 - bld r5,7 - bst r3,3 - bld r3,7 - bst r3,2 - bld r3,3 - bst r4,2 - bld r3,2 - bst r4,4 - bld r4,2 - bst r2,5 - bld r4,4 - bst r5,1 - bld r2,5 - bst r5,6 - bld r5,1 - bst r4,7 - bld r5,6 - bst r3,5 - bld r4,7 - bst r5,3 - bld r3,5 - bst r3,6 - bld r5,3 - bst r4,3 - bld r3,6 - bst r3,4 - bld r4,3 - bst r2,3 - bld r3,4 - bst r3,0 - bld r2,3 - bst r2,2 - bld r3,0 - bst r4,0 - bld r2,2 - bst r2,4 - bld r4,0 - bst r0,0 - bld r2,4 - bst r6,0 - bld r0,0 - bst r7,0 - bld r6,0 - bst r7,2 - bld r7,0 - bst r9,2 - bld r7,2 - bst r9,6 - bld r9,2 - bst r9,7 - bld r9,6 - bst r8,7 - bld r9,7 - bst r8,5 - bld r8,7 - bst r6,5 - bld r8,5 - bst r6,1 - bld r6,5 - bst r0,0 - bld r6,1 - bst r6,2 - bld r0,0 - bst r9,0 - bld r6,2 - bst r7,6 - bld r9,0 - bst r9,3 - bld r7,6 - bst r8,6 - bld r9,3 - bst r9,5 - bld r8,6 - bst r6,7 - bld r9,5 - bst r8,1 - bld r6,7 - bst r6,4 - bld r8,1 - bst r7,1 - bld r6,4 - bst r0,0 - bld r7,1 - bst r6,3 - bld r0,0 - bst r8,0 - bld r6,3 - bst r7,4 - bld r8,0 - bst r7,3 - bld r7,4 - bst r8,2 - bld r7,3 - bst r9,4 - bld r8,2 - bst r7,7 - bld r9,4 - bst r8,3 - bld r7,7 - bst r8,4 - bld r8,3 - bst r7,5 - bld r8,4 - bst r0,0 - bld r7,5 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r12,0 - bld r10,0 - bst r12,4 - bld r12,0 - bst r12,5 - bld r12,4 - bst r11,5 - bld r12,5 - bst r11,3 - bld r11,5 - bst r13,2 - bld r11,3 - bst r10,6 - bld r13,2 - bst r10,1 - bld r10,6 - bst r11,0 - bld r10,1 - bst r12,2 - bld r11,0 - bst r10,4 - bld r12,2 - bst r12,1 - bld r10,4 - bst r11,4 - bld r12,1 - bst r12,3 - bld r11,4 - bst r13,4 - bld r12,3 - bst r12,7 - bld r13,4 - bst r13,5 - bld r12,7 - bst r11,7 - bld r13,5 - bst r13,3 - bld r11,7 - bst r13,6 - bld r13,3 - bst r10,7 - bld r13,6 - bst r13,1 - bld r10,7 - bst r11,6 - bld r13,1 - bst r10,3 - bld r11,6 - bst r13,0 - bld r10,3 - bst r12,6 - bld r13,0 - bst r10,5 - bld r12,6 - bst r11,1 - bld r10,5 - bst r11,2 - bld r11,1 - bst r10,2 - bld r11,2 - bst r0,0 - bld r10,2 - bst r14,0 - bld r0,0 - bst r25,0 - bld r14,0 - bst r25,6 - bld r25,0 - bst r15,7 - bld r25,6 - bst r14,3 - bld r15,7 - bst r0,0 - bld r14,3 - bst r14,1 - bld r0,0 - bst r24,0 - bld r14,1 - bst r25,4 - bld r24,0 - bst r25,7 - bld r25,4 - bst r14,7 - bld r25,7 - bst r0,0 - bld r14,7 - bst r14,2 - bld r0,0 - bst r15,0 - bld r14,2 - bst r25,2 - bld r15,0 - bst r15,6 - bld r25,2 - bst r15,3 - bld r15,6 - bst r0,0 - bld r15,3 - bst r14,4 - bld r0,0 - bst r25,1 - bld r14,4 - bst r24,6 - bld r25,1 - bst r15,5 - bld r24,6 - bst r24,3 - bld r15,5 - bst r0,0 - bld r24,3 - bst r14,5 - bld r0,0 - bst r24,1 - bld r14,5 - bst r24,4 - bld r24,1 - bst r25,5 - bld r24,4 - bst r24,7 - bld r25,5 - bst r0,0 - bld r24,7 - bst r14,6 - bld r0,0 - bst r15,1 - bld r14,6 - bst r24,2 - bld r15,1 - bst r15,4 - bld r24,2 - bst r25,3 - bld r15,4 - bst r0,0 - bld r25,3 - movw r20,r14 - movw r22,r24 - movw r14,r2 - movw r24,r4 - movw r2,r20 - movw r4,r22 - and r20,r6 - and r21,r7 - and r22,r8 - and r23,r9 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - cp r19,r1 - breq 791f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 375b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 375b -791: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-full-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-full-avr.S deleted file mode 100644 index 3a7e6fb..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-full-avr.S +++ /dev/null @@ -1,8173 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - ldi r24,4 -33: - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r29 - ror r28 - ror r0 - lsr r29 - ror r28 - ror r0 - or r29,r0 - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r28 - mov r28,r4 - mov r4,r0 - mov r0,r29 - mov r29,r5 - mov r5,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - mov r0,r6 - mov r6,r10 - mov r10,r0 - mov r0,r7 - mov r7,r11 - mov r11,r0 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - st Z,r29 - std Z+1,r23 - std Z+2,r28 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+4,r29 - std Z+5,r23 - std Z+6,r28 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r28,Z+10 - ldd r29,Z+11 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+8,r29 - std Z+9,r23 - std Z+10,r28 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+12,r29 - std Z+13,r23 - std Z+14,r28 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+16,r29 - std Z+17,r23 - std Z+18,r28 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+20,r29 - std Z+21,r23 - std Z+22,r28 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+24,r29 - std Z+25,r23 - std Z+26,r28 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+28,r29 - std Z+29,r23 - std Z+30,r28 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - adiw r30,40 - movw r26,r30 - subi r26,80 - sbc r27,r1 - ldi r24,6 -1274: - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r2 - eor r19,r3 - andi r18,51 - andi r19,51 - eor r2,r18 - eor r3,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - movw r18,r22 - movw r20,r28 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - andi r28,204 - andi r29,204 - or r28,r21 - or r29,r18 - or r22,r19 - or r23,r20 - movw r18,r28 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r28 - eor r19,r29 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r28 - std Z+5,r29 - std Z+6,r22 - std Z+7,r23 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - swap r3 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - swap r5 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r29 - adc r29,r1 - lsl r29 - adc r29,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r28 - std Z+15,r29 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - ldi r25,85 - and r2,r25 - and r3,r25 - and r4,r25 - and r5,r25 - or r2,r19 - or r3,r20 - or r4,r21 - or r5,r18 - std Z+16,r4 - std Z+17,r5 - std Z+18,r2 - std Z+19,r3 - movw r18,r22 - movw r20,r28 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - andi r28,170 - andi r29,170 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - or r22,r18 - or r23,r19 - or r28,r20 - or r29,r21 - std Z+20,r29 - std Z+21,r22 - std Z+22,r23 - std Z+23,r28 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r14,r18 - movw r16,r20 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - eor r14,r18 - eor r15,r19 - eor r16,r20 - eor r17,r21 - ldi r25,8 - and r14,r25 - and r15,r25 - andi r16,8 - andi r17,8 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - ldi r17,15 - and r2,r17 - and r3,r17 - and r4,r17 - and r5,r17 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - movw r18,r28 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r2,r22 - movw r4,r28 - ldi r16,1 - and r2,r16 - and r3,r16 - and r4,r16 - and r5,r16 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - or r2,r18 - or r3,r19 - movw r18,r28 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r2,r18 - or r3,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r4,r18 - or r5,r19 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r4,r22 - or r5,r23 - std Z+28,r2 - std Z+29,r3 - std Z+30,r4 - std Z+31,r5 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - std Z+32,r3 - std Z+33,r2 - std Z+34,r4 - std Z+35,r5 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r28 - mov r28,r29 - mov r29,r0 - lsl r28 - rol r29 - adc r28,r1 - lsl r28 - rol r29 - adc r28,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r28 - std Z+39,r29 - dec r24 - breq 1733f - adiw r30,40 - rjmp 1274b -1733: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rcall 283f - rjmp 1021f -283: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -1021: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,160 - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rcall 286f - rjmp 1024f -286: - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r1 - lsr r22 - ror r0 - lsr r22 - ror r0 - or r22,r0 - mov r0,r1 - lsr r23 - ror r0 - lsr r23 - ror r0 - or r23,r0 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - swap r4 - swap r5 - swap r6 - swap r7 - lsl r8 - adc r8,r1 - lsl r8 - adc r8,r1 - lsl r9 - adc r9,r1 - lsl r9 - adc r9,r1 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,119 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,17 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1024: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 311f - rjmp 1049f -311: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,17 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,119 - and r14,r16 - and r15,r16 - andi r24,119 - andi r25,119 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - swap r6 - swap r7 - swap r8 - swap r9 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - mov r0,r1 - lsr r12 - ror r0 - lsr r12 - ror r0 - or r12,r0 - mov r0,r1 - lsr r13 - ror r0 - lsr r13 - ror r0 - or r13,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - ret -1049: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,160 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 314f - rjmp 1052f -314: - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r13 - mov r13,r12 - mov r12,r11 - mov r11,r10 - mov r10,r0 - mov r0,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - mov r0,r1 - lsr r4 - ror r0 - lsr r4 - ror r0 - or r4,r0 - mov r0,r1 - lsr r5 - ror r0 - lsr r5 - ror r0 - or r5,r0 - swap r6 - swap r7 - swap r8 - swap r9 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - com r2 - com r3 - com r4 - com r5 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - com r2 - com r3 - com r4 - com r5 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - dec r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - ld r23,-X - ld r22,-X - ld r21,-X - ld r20,-X - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,119 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r25 - ror r24 - ror r15 - ror r14 - lsr r25 - ror r24 - ror r15 - ror r14 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,17 - and r14,r16 - and r15,r16 - andi r24,17 - andi r25,17 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - ret -1052: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-small-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-small-avr.S deleted file mode 100644 index 6f2d68b..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-small-avr.S +++ /dev/null @@ -1,9331 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -33: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 329f - rcall 329f - rjmp 1541f -329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -1067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -934: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 1086f - cpse r16,r1 - rjmp 934b - rjmp 1431f -1086: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1431: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r19,20 -1: - ld r2,Z+ - ld r3,Z+ - ld r4,Z+ - ld r5,Z+ - std Y+1,r2 - std Y+2,r3 - std Y+3,r4 - std Y+4,r5 - adiw r28,4 - dec r19 - brne 1b - subi r28,80 - sbc r29,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,20 - adiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,40 - sbiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,60 - adiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,80 - sbiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,100 - adiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 1095f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,120 - sbiw r26,40 - rcall 357f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 357f - rjmp 1570f -357: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,17 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,119 - and r14,r16 - and r15,r16 - andi r24,119 - andi r25,119 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - swap r6 - swap r7 - swap r8 - swap r9 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - mov r0,r1 - lsr r12 - ror r0 - lsr r12 - ror r0 - or r12,r0 - mov r0,r1 - lsr r13 - ror r0 - lsr r13 - ror r0 - or r13,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - ret -1095: - movw r30,r26 - sbiw r30,40 - push r5 - push r4 - push r3 - push r2 - push r9 - push r8 - push r7 - push r6 - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,68 - andi r21,68 - andi r22,85 - andi r23,85 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - st Z,r26 - std Z+1,r27 - std Z+2,r16 - std Z+3,r17 - movw r20,r2 - movw r22,r4 - andi r20,51 - andi r21,51 - andi r22,51 - andi r23,51 - ldi r19,204 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - or r4,r23 - or r5,r20 - or r2,r21 - or r3,r22 - movw r20,r4 - movw r22,r2 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r4 - eor r21,r5 - eor r22,r2 - eor r23,r3 - mov r20,r1 - andi r21,17 - andi r22,85 - andi r23,85 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - std Z+4,r4 - std Z+5,r5 - std Z+6,r2 - std Z+7,r3 - ldd r2,Z+8 - ldd r3,Z+9 - ldd r4,Z+10 - ldd r5,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r16 - adc r16,r1 - lsl r16 - adc r16,r1 - swap r17 - std Z+8,r26 - std Z+9,r27 - std Z+10,r16 - std Z+11,r17 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - std Z+12,r2 - std Z+13,r3 - std Z+14,r4 - std Z+15,r5 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r16,Z+22 - ldd r17,Z+23 - movw r20,r26 - movw r22,r16 - andi r20,170 - andi r21,170 - andi r22,170 - andi r23,170 - andi r26,85 - andi r27,85 - andi r16,85 - andi r17,85 - or r26,r21 - or r27,r22 - or r16,r23 - or r17,r20 - std Z+16,r16 - std Z+17,r17 - std Z+18,r26 - std Z+19,r27 - movw r20,r2 - movw r22,r4 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - ldi r19,170 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - or r2,r20 - or r3,r21 - or r4,r22 - or r5,r23 - std Z+20,r5 - std Z+21,r2 - std Z+22,r3 - std Z+23,r4 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r16,Z+30 - ldd r17,Z+31 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,120 - andi r21,120 - andi r22,120 - andi r23,120 - movw r6,r20 - movw r8,r22 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldi r19,8 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r26,15 - andi r27,15 - andi r16,15 - andi r17,15 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - std Z+24,r26 - std Z+25,r27 - std Z+26,r16 - std Z+27,r17 - movw r20,r4 - lsr r21 - ror r20 - lsr r21 - ror r20 - andi r20,48 - andi r21,48 - movw r26,r2 - movw r16,r4 - andi r26,1 - andi r27,1 - andi r16,1 - andi r17,1 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - or r26,r20 - or r27,r21 - movw r20,r4 - lsl r20 - rol r21 - lsl r20 - rol r21 - andi r20,192 - andi r21,192 - or r26,r20 - or r27,r21 - movw r20,r2 - andi r20,224 - andi r21,224 - lsr r21 - ror r20 - or r16,r20 - or r17,r21 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,7 - andi r21,7 - andi r22,7 - andi r23,7 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - ldi r19,16 - and r2,r19 - and r3,r19 - lsl r2 - rol r3 - lsl r2 - rol r3 - lsl r2 - rol r3 - or r16,r2 - or r17,r3 - std Z+28,r26 - std Z+29,r27 - std Z+30,r16 - std Z+31,r17 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r16,Z+38 - ldd r17,Z+39 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r16 - std Z+35,r17 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r4 - mov r4,r5 - mov r5,r0 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - pop r6 - pop r7 - pop r8 - pop r9 - pop r2 - pop r3 - pop r4 - pop r5 - movw r26,r30 - ret -1570: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - mov r0,r17 - mov r17,r26 - mov r26,r0 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r26,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r19,40 - mov r26,r1 -939: - ldd r0,Y+13 - ldd r20,Y+9 - std Y+9,r0 - ldd r0,Y+5 - std Y+5,r20 - ldd r20,Y+1 - std Y+1,r0 - ldd r0,Y+14 - ldd r21,Y+10 - std Y+10,r0 - ldd r0,Y+6 - std Y+6,r21 - ldd r21,Y+2 - std Y+2,r0 - ldd r0,Y+15 - ldd r22,Y+11 - std Y+11,r0 - ldd r0,Y+7 - std Y+7,r22 - ldd r22,Y+3 - std Y+3,r0 - ldd r0,Y+16 - ldd r23,Y+12 - std Y+12,r0 - ldd r0,Y+8 - std Y+8,r23 - ldd r23,Y+4 - std Y+4,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - or r21,r0 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - std Y+13,r20 - std Y+14,r21 - std Y+15,r22 - std Y+16,r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldi r20,128 - eor r25,r20 - dec r19 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - bst r2,1 - bld r0,0 - bst r5,0 - bld r2,1 - bst r2,6 - bld r5,0 - bst r4,1 - bld r2,6 - bst r5,4 - bld r4,1 - bst r2,7 - bld r5,4 - bst r3,1 - bld r2,7 - bst r5,2 - bld r3,1 - bst r4,6 - bld r5,2 - bst r4,5 - bld r4,6 - bst r5,5 - bld r4,5 - bst r5,7 - bld r5,5 - bst r3,7 - bld r5,7 - bst r3,3 - bld r3,7 - bst r3,2 - bld r3,3 - bst r4,2 - bld r3,2 - bst r4,4 - bld r4,2 - bst r2,5 - bld r4,4 - bst r5,1 - bld r2,5 - bst r5,6 - bld r5,1 - bst r4,7 - bld r5,6 - bst r3,5 - bld r4,7 - bst r5,3 - bld r3,5 - bst r3,6 - bld r5,3 - bst r4,3 - bld r3,6 - bst r3,4 - bld r4,3 - bst r2,3 - bld r3,4 - bst r3,0 - bld r2,3 - bst r2,2 - bld r3,0 - bst r4,0 - bld r2,2 - bst r2,4 - bld r4,0 - bst r0,0 - bld r2,4 - bst r6,0 - bld r0,0 - bst r7,0 - bld r6,0 - bst r7,2 - bld r7,0 - bst r9,2 - bld r7,2 - bst r9,6 - bld r9,2 - bst r9,7 - bld r9,6 - bst r8,7 - bld r9,7 - bst r8,5 - bld r8,7 - bst r6,5 - bld r8,5 - bst r6,1 - bld r6,5 - bst r0,0 - bld r6,1 - bst r6,2 - bld r0,0 - bst r9,0 - bld r6,2 - bst r7,6 - bld r9,0 - bst r9,3 - bld r7,6 - bst r8,6 - bld r9,3 - bst r9,5 - bld r8,6 - bst r6,7 - bld r9,5 - bst r8,1 - bld r6,7 - bst r6,4 - bld r8,1 - bst r7,1 - bld r6,4 - bst r0,0 - bld r7,1 - bst r6,3 - bld r0,0 - bst r8,0 - bld r6,3 - bst r7,4 - bld r8,0 - bst r7,3 - bld r7,4 - bst r8,2 - bld r7,3 - bst r9,4 - bld r8,2 - bst r7,7 - bld r9,4 - bst r8,3 - bld r7,7 - bst r8,4 - bld r8,3 - bst r7,5 - bld r8,4 - bst r0,0 - bld r7,5 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r12,0 - bld r10,0 - bst r12,4 - bld r12,0 - bst r12,5 - bld r12,4 - bst r11,5 - bld r12,5 - bst r11,3 - bld r11,5 - bst r13,2 - bld r11,3 - bst r10,6 - bld r13,2 - bst r10,1 - bld r10,6 - bst r11,0 - bld r10,1 - bst r12,2 - bld r11,0 - bst r10,4 - bld r12,2 - bst r12,1 - bld r10,4 - bst r11,4 - bld r12,1 - bst r12,3 - bld r11,4 - bst r13,4 - bld r12,3 - bst r12,7 - bld r13,4 - bst r13,5 - bld r12,7 - bst r11,7 - bld r13,5 - bst r13,3 - bld r11,7 - bst r13,6 - bld r13,3 - bst r10,7 - bld r13,6 - bst r13,1 - bld r10,7 - bst r11,6 - bld r13,1 - bst r10,3 - bld r11,6 - bst r13,0 - bld r10,3 - bst r12,6 - bld r13,0 - bst r10,5 - bld r12,6 - bst r11,1 - bld r10,5 - bst r11,2 - bld r11,1 - bst r10,2 - bld r11,2 - bst r0,0 - bld r10,2 - bst r14,0 - bld r0,0 - bst r25,0 - bld r14,0 - bst r25,6 - bld r25,0 - bst r15,7 - bld r25,6 - bst r14,3 - bld r15,7 - bst r0,0 - bld r14,3 - bst r14,1 - bld r0,0 - bst r24,0 - bld r14,1 - bst r25,4 - bld r24,0 - bst r25,7 - bld r25,4 - bst r14,7 - bld r25,7 - bst r0,0 - bld r14,7 - bst r14,2 - bld r0,0 - bst r15,0 - bld r14,2 - bst r25,2 - bld r15,0 - bst r15,6 - bld r25,2 - bst r15,3 - bld r15,6 - bst r0,0 - bld r15,3 - bst r14,4 - bld r0,0 - bst r25,1 - bld r14,4 - bst r24,6 - bld r25,1 - bst r15,5 - bld r24,6 - bst r24,3 - bld r15,5 - bst r0,0 - bld r24,3 - bst r14,5 - bld r0,0 - bst r24,1 - bld r14,5 - bst r24,4 - bld r24,1 - bst r25,5 - bld r24,4 - bst r24,7 - bld r25,5 - bst r0,0 - bld r24,7 - bst r14,6 - bld r0,0 - bst r15,1 - bld r14,6 - bst r24,2 - bld r15,1 - bst r15,4 - bld r24,2 - bst r25,3 - bld r15,4 - bst r0,0 - bld r25,3 - movw r20,r14 - movw r22,r24 - movw r14,r2 - movw r24,r4 - movw r2,r20 - movw r4,r22 - and r20,r6 - and r21,r7 - and r22,r8 - and r23,r9 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - cp r19,r1 - breq 1355f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 939b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 939b -1355: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-tiny-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-tiny-avr.S deleted file mode 100644 index dd1f7b9..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-gift128n-tiny-avr.S +++ /dev/null @@ -1,9480 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128n_init - .type gift128n_init, @function -gift128n_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - st Z,r22 - std Z+1,r23 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128n_init, .-gift128n_init - - .text -.global gift128n_encrypt - .type gift128n_encrypt, @function -gift128n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1585f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2323f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1585f - rcall 1585f - rjmp 2797f -1585: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2323: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2797: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_encrypt, .-gift128n_encrypt - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128n_decrypt - .type gift128n_decrypt, @function -gift128n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r22,0 - bst r18,1 - bld r4,0 - bst r18,2 - bld r8,0 - bst r18,3 - bld r12,0 - bst r18,4 - bld r22,1 - bst r18,5 - bld r4,1 - bst r18,6 - bld r8,1 - bst r18,7 - bld r12,1 - bst r19,0 - bld r22,2 - bst r19,1 - bld r4,2 - bst r19,2 - bld r8,2 - bst r19,3 - bld r12,2 - bst r19,4 - bld r22,3 - bst r19,5 - bld r4,3 - bst r19,6 - bld r8,3 - bst r19,7 - bld r12,3 - bst r20,0 - bld r22,4 - bst r20,1 - bld r4,4 - bst r20,2 - bld r8,4 - bst r20,3 - bld r12,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r4,5 - bst r20,6 - bld r8,5 - bst r20,7 - bld r12,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r4,6 - bst r21,2 - bld r8,6 - bst r21,3 - bld r12,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r4,7 - bst r21,6 - bld r8,7 - bst r21,7 - bld r12,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r23,0 - bst r18,1 - bld r5,0 - bst r18,2 - bld r9,0 - bst r18,3 - bld r13,0 - bst r18,4 - bld r23,1 - bst r18,5 - bld r5,1 - bst r18,6 - bld r9,1 - bst r18,7 - bld r13,1 - bst r19,0 - bld r23,2 - bst r19,1 - bld r5,2 - bst r19,2 - bld r9,2 - bst r19,3 - bld r13,2 - bst r19,4 - bld r23,3 - bst r19,5 - bld r5,3 - bst r19,6 - bld r9,3 - bst r19,7 - bld r13,3 - bst r20,0 - bld r23,4 - bst r20,1 - bld r5,4 - bst r20,2 - bld r9,4 - bst r20,3 - bld r13,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r5,5 - bst r20,6 - bld r9,5 - bst r20,7 - bld r13,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r5,6 - bst r21,2 - bld r9,6 - bst r21,3 - bld r13,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r5,7 - bst r21,6 - bld r9,7 - bst r21,7 - bld r13,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r2,0 - bst r18,1 - bld r6,0 - bst r18,2 - bld r10,0 - bst r18,3 - bld r14,0 - bst r18,4 - bld r2,1 - bst r18,5 - bld r6,1 - bst r18,6 - bld r10,1 - bst r18,7 - bld r14,1 - bst r19,0 - bld r2,2 - bst r19,1 - bld r6,2 - bst r19,2 - bld r10,2 - bst r19,3 - bld r14,2 - bst r19,4 - bld r2,3 - bst r19,5 - bld r6,3 - bst r19,6 - bld r10,3 - bst r19,7 - bld r14,3 - bst r20,0 - bld r2,4 - bst r20,1 - bld r6,4 - bst r20,2 - bld r10,4 - bst r20,3 - bld r14,4 - bst r20,4 - bld r2,5 - bst r20,5 - bld r6,5 - bst r20,6 - bld r10,5 - bst r20,7 - bld r14,5 - bst r21,0 - bld r2,6 - bst r21,1 - bld r6,6 - bst r21,2 - bld r10,6 - bst r21,3 - bld r14,6 - bst r21,4 - bld r2,7 - bst r21,5 - bld r6,7 - bst r21,6 - bld r10,7 - bst r21,7 - bld r14,7 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - bst r18,0 - bld r3,0 - bst r18,1 - bld r7,0 - bst r18,2 - bld r11,0 - bst r18,3 - bld r15,0 - bst r18,4 - bld r3,1 - bst r18,5 - bld r7,1 - bst r18,6 - bld r11,1 - bst r18,7 - bld r15,1 - bst r19,0 - bld r3,2 - bst r19,1 - bld r7,2 - bst r19,2 - bld r11,2 - bst r19,3 - bld r15,2 - bst r19,4 - bld r3,3 - bst r19,5 - bld r7,3 - bst r19,6 - bld r11,3 - bst r19,7 - bld r15,3 - bst r20,0 - bld r3,4 - bst r20,1 - bld r7,4 - bst r20,2 - bld r11,4 - bst r20,3 - bld r15,4 - bst r20,4 - bld r3,5 - bst r20,5 - bld r7,5 - bst r20,6 - bld r11,5 - bst r20,7 - bld r15,5 - bst r21,0 - bld r3,6 - bst r21,1 - bld r7,6 - bst r21,2 - bld r11,6 - bst r21,3 - bld r15,6 - bst r21,4 - bld r3,7 - bst r21,5 - bld r7,7 - bst r21,6 - bld r11,7 - bst r21,7 - bld r15,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -370: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 522f - cpse r16,r1 - rjmp 370b - rjmp 867f -522: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -867: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r18,0 - bst r4,0 - bld r18,1 - bst r8,0 - bld r18,2 - bst r12,0 - bld r18,3 - bst r22,1 - bld r18,4 - bst r4,1 - bld r18,5 - bst r8,1 - bld r18,6 - bst r12,1 - bld r18,7 - bst r22,2 - bld r19,0 - bst r4,2 - bld r19,1 - bst r8,2 - bld r19,2 - bst r12,2 - bld r19,3 - bst r22,3 - bld r19,4 - bst r4,3 - bld r19,5 - bst r8,3 - bld r19,6 - bst r12,3 - bld r19,7 - bst r22,4 - bld r20,0 - bst r4,4 - bld r20,1 - bst r8,4 - bld r20,2 - bst r12,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r4,5 - bld r20,5 - bst r8,5 - bld r20,6 - bst r12,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r4,6 - bld r21,1 - bst r8,6 - bld r21,2 - bst r12,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r4,7 - bld r21,5 - bst r8,7 - bld r21,6 - bst r12,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r23,0 - bld r18,0 - bst r5,0 - bld r18,1 - bst r9,0 - bld r18,2 - bst r13,0 - bld r18,3 - bst r23,1 - bld r18,4 - bst r5,1 - bld r18,5 - bst r9,1 - bld r18,6 - bst r13,1 - bld r18,7 - bst r23,2 - bld r19,0 - bst r5,2 - bld r19,1 - bst r9,2 - bld r19,2 - bst r13,2 - bld r19,3 - bst r23,3 - bld r19,4 - bst r5,3 - bld r19,5 - bst r9,3 - bld r19,6 - bst r13,3 - bld r19,7 - bst r23,4 - bld r20,0 - bst r5,4 - bld r20,1 - bst r9,4 - bld r20,2 - bst r13,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r5,5 - bld r20,5 - bst r9,5 - bld r20,6 - bst r13,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r5,6 - bld r21,1 - bst r9,6 - bld r21,2 - bst r13,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r5,7 - bld r21,5 - bst r9,7 - bld r21,6 - bst r13,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r2,0 - bld r18,0 - bst r6,0 - bld r18,1 - bst r10,0 - bld r18,2 - bst r14,0 - bld r18,3 - bst r2,1 - bld r18,4 - bst r6,1 - bld r18,5 - bst r10,1 - bld r18,6 - bst r14,1 - bld r18,7 - bst r2,2 - bld r19,0 - bst r6,2 - bld r19,1 - bst r10,2 - bld r19,2 - bst r14,2 - bld r19,3 - bst r2,3 - bld r19,4 - bst r6,3 - bld r19,5 - bst r10,3 - bld r19,6 - bst r14,3 - bld r19,7 - bst r2,4 - bld r20,0 - bst r6,4 - bld r20,1 - bst r10,4 - bld r20,2 - bst r14,4 - bld r20,3 - bst r2,5 - bld r20,4 - bst r6,5 - bld r20,5 - bst r10,5 - bld r20,6 - bst r14,5 - bld r20,7 - bst r2,6 - bld r21,0 - bst r6,6 - bld r21,1 - bst r10,6 - bld r21,2 - bst r14,6 - bld r21,3 - bst r2,7 - bld r21,4 - bst r6,7 - bld r21,5 - bst r10,7 - bld r21,6 - bst r14,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - bst r3,0 - bld r18,0 - bst r7,0 - bld r18,1 - bst r11,0 - bld r18,2 - bst r15,0 - bld r18,3 - bst r3,1 - bld r18,4 - bst r7,1 - bld r18,5 - bst r11,1 - bld r18,6 - bst r15,1 - bld r18,7 - bst r3,2 - bld r19,0 - bst r7,2 - bld r19,1 - bst r11,2 - bld r19,2 - bst r15,2 - bld r19,3 - bst r3,3 - bld r19,4 - bst r7,3 - bld r19,5 - bst r11,3 - bld r19,6 - bst r15,3 - bld r19,7 - bst r3,4 - bld r20,0 - bst r7,4 - bld r20,1 - bst r11,4 - bld r20,2 - bst r15,4 - bld r20,3 - bst r3,5 - bld r20,4 - bst r7,5 - bld r20,5 - bst r11,5 - bld r20,6 - bst r15,5 - bld r20,7 - bst r3,6 - bld r21,0 - bst r7,6 - bld r21,1 - bst r11,6 - bld r21,2 - bst r15,6 - bld r21,3 - bst r3,7 - bld r21,4 - bst r7,7 - bld r21,5 - bst r11,7 - bld r21,6 - bst r15,7 - bld r21,7 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128n_decrypt, .-gift128n_decrypt - - .text -.global gift128t_encrypt - .type gift128t_encrypt, @function -gift128t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - ldd r6,Z+4 - ldd r7,Z+5 - ldd r8,Z+6 - ldd r9,Z+7 - ldd r10,Z+8 - ldd r11,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - ldd r14,Z+12 - ldd r15,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - st Z+,r24 - st Z+,r25 - ldi r19,4 -35: - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - mov r0,r4 - mov r4,r8 - mov r8,r0 - mov r0,r5 - mov r5,r9 - mov r9,r0 - st Z+,r14 - st Z+,r15 - st Z+,r24 - st Z+,r25 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - mov r0,r12 - mov r12,r24 - mov r24,r0 - mov r0,r13 - mov r13,r25 - mov r25,r0 - dec r19 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r19,2 -121: - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - st Z,r5 - std Z+1,r3 - std Z+2,r4 - std Z+3,r2 - ldd r2,Z+4 - ldd r3,Z+5 - ldd r4,Z+6 - ldd r5,Z+7 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,85 - mov r21,r1 - andi r22,85 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+4,r5 - std Z+5,r3 - std Z+6,r4 - std Z+7,r2 - ldd r2,Z+8 - ldd r3,Z+9 - ldd r4,Z+10 - ldd r5,Z+11 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+8,r5 - std Z+9,r3 - std Z+10,r4 - std Z+11,r2 - ldd r2,Z+12 - ldd r3,Z+13 - ldd r4,Z+14 - ldd r5,Z+15 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,17 - andi r21,17 - andi r22,17 - andi r23,17 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,15 - mov r21,r1 - andi r22,15 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+12,r5 - std Z+13,r3 - std Z+14,r4 - std Z+15,r2 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r21 - rol r22 - rol r23 - rol r0 - movw r20,r22 - mov r22,r0 - mov r23,r1 - eor r20,r2 - eor r21,r3 - andi r20,170 - andi r21,170 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r0,r1 - lsr r22 - ror r21 - ror r20 - ror r0 - movw r22,r20 - mov r21,r0 - mov r20,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+16,r5 - std Z+17,r3 - std Z+18,r4 - std Z+19,r2 - ldd r2,Z+20 - ldd r3,Z+21 - ldd r4,Z+22 - ldd r5,Z+23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r21 - rol r22 - rol r23 - rol r0 - movw r20,r22 - mov r22,r0 - mov r23,r1 - eor r20,r2 - eor r21,r3 - andi r20,170 - andi r21,170 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r0,r1 - lsr r22 - ror r21 - ror r20 - ror r0 - movw r22,r20 - mov r21,r0 - mov r20,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - movw r20,r22 - mov r22,r1 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,51 - andi r21,51 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+20,r5 - std Z+21,r3 - std Z+22,r4 - std Z+23,r2 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,10 - andi r21,10 - andi r22,10 - andi r23,10 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,204 - mov r21,r1 - andi r22,204 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+24,r5 - std Z+25,r3 - std Z+26,r4 - std Z+27,r2 - ldd r2,Z+28 - ldd r3,Z+29 - ldd r4,Z+30 - ldd r5,Z+31 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,10 - andi r21,10 - andi r22,10 - andi r23,10 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r0,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - lsl r20 - rol r21 - rol r22 - rol r23 - rol r0 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r0 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - andi r20,204 - mov r21,r1 - andi r22,204 - mov r23,r1 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - lsr r23 - ror r22 - ror r21 - ror r20 - ror r0 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r0 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - movw r20,r2 - movw r22,r4 - mov r20,r21 - mov r21,r22 - mov r22,r23 - mov r23,r1 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r2 - eor r21,r3 - andi r20,240 - andi r21,240 - eor r2,r20 - eor r3,r21 - mov r22,r1 - mov r23,r1 - mov r23,r22 - mov r22,r21 - mov r21,r20 - mov r20,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - std Z+28,r5 - std Z+29,r3 - std Z+30,r4 - std Z+31,r2 - dec r19 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,20 - adiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,60 - adiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,100 - adiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2351f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r19,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r19 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1613f - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rcall 1613f - rjmp 2826f -1613: - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,204 - andi r21,204 - andi r22,204 - andi r23,204 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - ldi r19,51 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - or r6,r20 - or r7,r21 - or r8,r22 - or r9,r23 - movw r20,r10 - movw r22,r12 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,238 - andi r21,238 - andi r22,238 - andi r23,238 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - lsr r13 - ror r12 - ror r11 - ror r10 - ldi r17,17 - and r10,r17 - and r11,r17 - and r12,r17 - and r13,r17 - or r10,r20 - or r11,r21 - or r12,r22 - or r13,r23 - movw r20,r14 - movw r22,r24 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - andi r20,136 - andi r21,136 - andi r22,136 - andi r23,136 - lsr r25 - ror r24 - ror r15 - ror r14 - ldi r16,119 - and r14,r16 - and r15,r16 - andi r24,119 - andi r25,119 - or r14,r20 - or r15,r21 - or r24,r22 - or r25,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - lsl r12 - rol r13 - adc r12,r1 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - movw r20,r6 - movw r22,r8 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - mov r0,r12 - mov r12,r10 - mov r10,r0 - mov r0,r13 - mov r13,r11 - mov r11,r0 - movw r20,r10 - movw r22,r12 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r10 - eor r21,r11 - andi r20,85 - andi r21,85 - eor r10,r20 - eor r11,r21 - mov r22,r1 - mov r23,r1 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - mov r0,r24 - mov r24,r14 - mov r14,r0 - mov r0,r25 - mov r25,r15 - mov r15,r0 - movw r20,r24 - lsr r21 - ror r20 - eor r20,r24 - eor r21,r25 - andi r20,85 - andi r21,85 - eor r24,r20 - eor r25,r21 - lsl r20 - rol r21 - eor r24,r20 - eor r25,r21 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r24 - and r0,r12 - eor r8,r0 - mov r0,r25 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r8 - and r0,r4 - eor r24,r0 - mov r0,r9 - and r0,r5 - eor r25,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r24 - or r0,r8 - eor r12,r0 - mov r0,r25 - or r0,r9 - eor r13,r0 - eor r2,r10 - eor r3,r11 - eor r4,r12 - eor r5,r13 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - com r2 - com r3 - com r4 - com r5 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r24 - and r0,r8 - eor r12,r0 - mov r0,r25 - and r0,r9 - eor r13,r0 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - swap r6 - swap r7 - swap r8 - swap r9 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - mov r0,r1 - lsr r12 - ror r0 - lsr r12 - ror r0 - or r12,r0 - mov r0,r1 - lsr r13 - ror r0 - lsr r13 - ror r0 - or r13,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r14,r20 - eor r15,r21 - eor r24,r22 - eor r25,r23 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - com r14 - com r15 - com r24 - com r25 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r8 - eor r12,r0 - mov r0,r5 - and r0,r9 - eor r13,r0 - mov r0,r8 - mov r8,r6 - mov r6,r0 - mov r0,r9 - mov r9,r7 - mov r7,r0 - mov r0,r10 - mov r10,r11 - mov r11,r12 - mov r12,r13 - mov r13,r0 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - ret -2351: - movw r30,r26 - sbiw r30,40 - push r5 - push r4 - push r3 - push r2 - push r9 - push r8 - push r7 - push r6 - ld r2,Z - ldd r3,Z+1 - ldd r4,Z+2 - ldd r5,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - movw r20,r26 - movw r22,r16 - movw r20,r22 - mov r22,r1 - mov r23,r1 - eor r20,r26 - eor r21,r27 - andi r20,51 - andi r21,51 - eor r26,r20 - eor r27,r21 - mov r22,r1 - mov r23,r1 - movw r22,r20 - mov r20,r1 - mov r21,r1 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,68 - andi r21,68 - andi r22,85 - andi r23,85 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - st Z,r26 - std Z+1,r27 - std Z+2,r16 - std Z+3,r17 - movw r20,r2 - movw r22,r4 - andi r20,51 - andi r21,51 - andi r22,51 - andi r23,51 - ldi r19,204 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - or r4,r23 - or r5,r20 - or r2,r21 - or r3,r22 - movw r20,r4 - movw r22,r2 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r4 - eor r21,r5 - eor r22,r2 - eor r23,r3 - mov r20,r1 - andi r21,17 - andi r22,85 - andi r23,85 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r4,r20 - eor r5,r21 - eor r2,r22 - eor r3,r23 - std Z+4,r4 - std Z+5,r5 - std Z+6,r2 - std Z+7,r3 - ldd r2,Z+8 - ldd r3,Z+9 - ldd r4,Z+10 - ldd r5,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r16 - adc r16,r1 - lsl r16 - adc r16,r1 - swap r17 - std Z+8,r26 - std Z+9,r27 - std Z+10,r16 - std Z+11,r17 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r5 - adc r5,r1 - lsl r5 - adc r5,r1 - std Z+12,r2 - std Z+13,r3 - std Z+14,r4 - std Z+15,r5 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r16,Z+22 - ldd r17,Z+23 - movw r20,r26 - movw r22,r16 - andi r20,170 - andi r21,170 - andi r22,170 - andi r23,170 - andi r26,85 - andi r27,85 - andi r16,85 - andi r17,85 - or r26,r21 - or r27,r22 - or r16,r23 - or r17,r20 - std Z+16,r16 - std Z+17,r17 - std Z+18,r26 - std Z+19,r27 - movw r20,r2 - movw r22,r4 - andi r20,85 - andi r21,85 - andi r22,85 - andi r23,85 - ldi r19,170 - and r2,r19 - and r3,r19 - and r4,r19 - and r5,r19 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - lsl r2 - rol r3 - rol r4 - rol r5 - adc r2,r1 - or r2,r20 - or r3,r21 - or r4,r22 - or r5,r23 - std Z+20,r5 - std Z+21,r2 - std Z+22,r3 - std Z+23,r4 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r16,Z+30 - ldd r17,Z+31 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - lsr r23 - ror r22 - ror r21 - ror r20 - eor r20,r26 - eor r21,r27 - eor r22,r16 - eor r23,r17 - andi r20,3 - andi r21,3 - andi r22,3 - andi r23,3 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - lsl r20 - rol r21 - rol r22 - rol r23 - lsl r20 - rol r21 - rol r22 - rol r23 - eor r26,r20 - eor r27,r21 - eor r16,r22 - eor r17,r23 - movw r20,r26 - movw r22,r16 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,120 - andi r21,120 - andi r22,120 - andi r23,120 - movw r6,r20 - movw r8,r22 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - lsr r9 - ror r8 - ror r7 - ror r6 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldi r19,8 - and r6,r19 - and r7,r19 - and r8,r19 - and r9,r19 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - lsl r6 - rol r7 - rol r8 - rol r9 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - andi r26,15 - andi r27,15 - andi r16,15 - andi r17,15 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - std Z+24,r26 - std Z+25,r27 - std Z+26,r16 - std Z+27,r17 - movw r20,r4 - lsr r21 - ror r20 - lsr r21 - ror r20 - andi r20,48 - andi r21,48 - movw r26,r2 - movw r16,r4 - andi r26,1 - andi r27,1 - andi r16,1 - andi r17,1 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - lsl r26 - rol r27 - rol r16 - rol r17 - or r26,r20 - or r27,r21 - movw r20,r4 - lsl r20 - rol r21 - lsl r20 - rol r21 - andi r20,192 - andi r21,192 - or r26,r20 - or r27,r21 - movw r20,r2 - andi r20,224 - andi r21,224 - lsr r21 - ror r20 - or r16,r20 - or r17,r21 - movw r20,r2 - movw r22,r4 - lsr r23 - ror r22 - ror r21 - ror r20 - andi r20,7 - andi r21,7 - andi r22,7 - andi r23,7 - or r26,r20 - or r27,r21 - or r16,r22 - or r17,r23 - ldi r19,16 - and r2,r19 - and r3,r19 - lsl r2 - rol r3 - lsl r2 - rol r3 - lsl r2 - rol r3 - or r16,r2 - or r17,r3 - std Z+28,r26 - std Z+29,r27 - std Z+30,r16 - std Z+31,r17 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r16,Z+38 - ldd r17,Z+39 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r16 - std Z+35,r17 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r4 - mov r4,r5 - mov r5,r0 - lsl r4 - rol r5 - adc r4,r1 - lsl r4 - rol r5 - adc r4,r1 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - pop r6 - pop r7 - pop r8 - pop r9 - pop r2 - pop r3 - pop r4 - pop r5 - movw r26,r30 - ret -2826: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_encrypt, .-gift128t_encrypt - - .text -.global gift128t_decrypt - .type gift128t_decrypt, @function -gift128t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r2,0 - bst r20,1 - bld r6,0 - bst r20,2 - bld r10,0 - bst r20,3 - bld r14,0 - bst r20,4 - bld r2,1 - bst r20,5 - bld r6,1 - bst r20,6 - bld r10,1 - bst r20,7 - bld r14,1 - bst r21,0 - bld r2,2 - bst r21,1 - bld r6,2 - bst r21,2 - bld r10,2 - bst r21,3 - bld r14,2 - bst r21,4 - bld r2,3 - bst r21,5 - bld r6,3 - bst r21,6 - bld r10,3 - bst r21,7 - bld r14,3 - bst r22,0 - bld r2,4 - bst r22,1 - bld r6,4 - bst r22,2 - bld r10,4 - bst r22,3 - bld r14,4 - bst r22,4 - bld r2,5 - bst r22,5 - bld r6,5 - bst r22,6 - bld r10,5 - bst r22,7 - bld r14,5 - bst r23,0 - bld r2,6 - bst r23,1 - bld r6,6 - bst r23,2 - bld r10,6 - bst r23,3 - bld r14,6 - bst r23,4 - bld r2,7 - bst r23,5 - bld r6,7 - bst r23,6 - bld r10,7 - bst r23,7 - bld r14,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r3,0 - bst r20,1 - bld r7,0 - bst r20,2 - bld r11,0 - bst r20,3 - bld r15,0 - bst r20,4 - bld r3,1 - bst r20,5 - bld r7,1 - bst r20,6 - bld r11,1 - bst r20,7 - bld r15,1 - bst r21,0 - bld r3,2 - bst r21,1 - bld r7,2 - bst r21,2 - bld r11,2 - bst r21,3 - bld r15,2 - bst r21,4 - bld r3,3 - bst r21,5 - bld r7,3 - bst r21,6 - bld r11,3 - bst r21,7 - bld r15,3 - bst r22,0 - bld r3,4 - bst r22,1 - bld r7,4 - bst r22,2 - bld r11,4 - bst r22,3 - bld r15,4 - bst r22,4 - bld r3,5 - bst r22,5 - bld r7,5 - bst r22,6 - bld r11,5 - bst r22,7 - bld r15,5 - bst r23,0 - bld r3,6 - bst r23,1 - bld r7,6 - bst r23,2 - bld r11,6 - bst r23,3 - bld r15,6 - bst r23,4 - bld r3,7 - bst r23,5 - bld r7,7 - bst r23,6 - bld r11,7 - bst r23,7 - bld r15,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r4,0 - bst r20,1 - bld r8,0 - bst r20,2 - bld r12,0 - bst r20,3 - bld r24,0 - bst r20,4 - bld r4,1 - bst r20,5 - bld r8,1 - bst r20,6 - bld r12,1 - bst r20,7 - bld r24,1 - bst r21,0 - bld r4,2 - bst r21,1 - bld r8,2 - bst r21,2 - bld r12,2 - bst r21,3 - bld r24,2 - bst r21,4 - bld r4,3 - bst r21,5 - bld r8,3 - bst r21,6 - bld r12,3 - bst r21,7 - bld r24,3 - bst r22,0 - bld r4,4 - bst r22,1 - bld r8,4 - bst r22,2 - bld r12,4 - bst r22,3 - bld r24,4 - bst r22,4 - bld r4,5 - bst r22,5 - bld r8,5 - bst r22,6 - bld r12,5 - bst r22,7 - bld r24,5 - bst r23,0 - bld r4,6 - bst r23,1 - bld r8,6 - bst r23,2 - bld r12,6 - bst r23,3 - bld r24,6 - bst r23,4 - bld r4,7 - bst r23,5 - bld r8,7 - bst r23,6 - bld r12,7 - bst r23,7 - bld r24,7 - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - bst r20,0 - bld r5,0 - bst r20,1 - bld r9,0 - bst r20,2 - bld r13,0 - bst r20,3 - bld r25,0 - bst r20,4 - bld r5,1 - bst r20,5 - bld r9,1 - bst r20,6 - bld r13,1 - bst r20,7 - bld r25,1 - bst r21,0 - bld r5,2 - bst r21,1 - bld r9,2 - bst r21,2 - bld r13,2 - bst r21,3 - bld r25,2 - bst r21,4 - bld r5,3 - bst r21,5 - bld r9,3 - bst r21,6 - bld r13,3 - bst r21,7 - bld r25,3 - bst r22,0 - bld r5,4 - bst r22,1 - bld r9,4 - bst r22,2 - bld r13,4 - bst r22,3 - bld r25,4 - bst r22,4 - bld r5,5 - bst r22,5 - bld r9,5 - bst r22,6 - bld r13,5 - bst r22,7 - bld r25,5 - bst r23,0 - bld r5,6 - bst r23,1 - bld r9,6 - bst r23,2 - bld r13,6 - bst r23,3 - bld r25,6 - bst r23,4 - bld r5,7 - bst r23,5 - bld r9,7 - bst r23,6 - bld r13,7 - bst r23,7 - bld r25,7 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r16,Z+14 - ldd r17,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r16 - std Y+4,r17 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r16,Z+6 - ldd r17,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r16 - std Y+8,r17 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r16,Z+10 - ldd r17,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r16 - std Y+12,r17 - ld r26,Z - ldd r27,Z+1 - ldd r16,Z+2 - ldd r17,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - lsr r17 - ror r16 - ror r0 - or r17,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r16 - std Y+16,r17 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r26,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r19,40 - mov r26,r1 -375: - ldd r0,Y+13 - ldd r20,Y+9 - std Y+9,r0 - ldd r0,Y+5 - std Y+5,r20 - ldd r20,Y+1 - std Y+1,r0 - ldd r0,Y+14 - ldd r21,Y+10 - std Y+10,r0 - ldd r0,Y+6 - std Y+6,r21 - ldd r21,Y+2 - std Y+2,r0 - ldd r0,Y+15 - ldd r22,Y+11 - std Y+11,r0 - ldd r0,Y+7 - std Y+7,r22 - ldd r22,Y+3 - std Y+3,r0 - ldd r0,Y+16 - ldd r23,Y+12 - std Y+12,r0 - ldd r0,Y+8 - std Y+8,r23 - ldd r23,Y+4 - std Y+4,r0 - mov r0,r1 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - lsr r21 - ror r20 - ror r0 - or r21,r0 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - std Y+13,r20 - std Y+14,r21 - std Y+15,r22 - std Y+16,r23 - eor r6,r20 - eor r7,r21 - eor r8,r22 - eor r9,r23 - ldd r0,Y+5 - eor r10,r0 - ldd r0,Y+6 - eor r11,r0 - ldd r0,Y+7 - eor r12,r0 - ldd r0,Y+8 - eor r13,r0 - ldi r20,128 - eor r25,r20 - dec r19 - mov r30,r19 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - eor r14,r20 - bst r2,1 - bld r0,0 - bst r5,0 - bld r2,1 - bst r2,6 - bld r5,0 - bst r4,1 - bld r2,6 - bst r5,4 - bld r4,1 - bst r2,7 - bld r5,4 - bst r3,1 - bld r2,7 - bst r5,2 - bld r3,1 - bst r4,6 - bld r5,2 - bst r4,5 - bld r4,6 - bst r5,5 - bld r4,5 - bst r5,7 - bld r5,5 - bst r3,7 - bld r5,7 - bst r3,3 - bld r3,7 - bst r3,2 - bld r3,3 - bst r4,2 - bld r3,2 - bst r4,4 - bld r4,2 - bst r2,5 - bld r4,4 - bst r5,1 - bld r2,5 - bst r5,6 - bld r5,1 - bst r4,7 - bld r5,6 - bst r3,5 - bld r4,7 - bst r5,3 - bld r3,5 - bst r3,6 - bld r5,3 - bst r4,3 - bld r3,6 - bst r3,4 - bld r4,3 - bst r2,3 - bld r3,4 - bst r3,0 - bld r2,3 - bst r2,2 - bld r3,0 - bst r4,0 - bld r2,2 - bst r2,4 - bld r4,0 - bst r0,0 - bld r2,4 - bst r6,0 - bld r0,0 - bst r7,0 - bld r6,0 - bst r7,2 - bld r7,0 - bst r9,2 - bld r7,2 - bst r9,6 - bld r9,2 - bst r9,7 - bld r9,6 - bst r8,7 - bld r9,7 - bst r8,5 - bld r8,7 - bst r6,5 - bld r8,5 - bst r6,1 - bld r6,5 - bst r0,0 - bld r6,1 - bst r6,2 - bld r0,0 - bst r9,0 - bld r6,2 - bst r7,6 - bld r9,0 - bst r9,3 - bld r7,6 - bst r8,6 - bld r9,3 - bst r9,5 - bld r8,6 - bst r6,7 - bld r9,5 - bst r8,1 - bld r6,7 - bst r6,4 - bld r8,1 - bst r7,1 - bld r6,4 - bst r0,0 - bld r7,1 - bst r6,3 - bld r0,0 - bst r8,0 - bld r6,3 - bst r7,4 - bld r8,0 - bst r7,3 - bld r7,4 - bst r8,2 - bld r7,3 - bst r9,4 - bld r8,2 - bst r7,7 - bld r9,4 - bst r8,3 - bld r7,7 - bst r8,4 - bld r8,3 - bst r7,5 - bld r8,4 - bst r0,0 - bld r7,5 - bst r6,6 - bld r0,0 - bst r9,1 - bld r6,6 - bst r0,0 - bld r9,1 - bst r10,0 - bld r0,0 - bst r12,0 - bld r10,0 - bst r12,4 - bld r12,0 - bst r12,5 - bld r12,4 - bst r11,5 - bld r12,5 - bst r11,3 - bld r11,5 - bst r13,2 - bld r11,3 - bst r10,6 - bld r13,2 - bst r10,1 - bld r10,6 - bst r11,0 - bld r10,1 - bst r12,2 - bld r11,0 - bst r10,4 - bld r12,2 - bst r12,1 - bld r10,4 - bst r11,4 - bld r12,1 - bst r12,3 - bld r11,4 - bst r13,4 - bld r12,3 - bst r12,7 - bld r13,4 - bst r13,5 - bld r12,7 - bst r11,7 - bld r13,5 - bst r13,3 - bld r11,7 - bst r13,6 - bld r13,3 - bst r10,7 - bld r13,6 - bst r13,1 - bld r10,7 - bst r11,6 - bld r13,1 - bst r10,3 - bld r11,6 - bst r13,0 - bld r10,3 - bst r12,6 - bld r13,0 - bst r10,5 - bld r12,6 - bst r11,1 - bld r10,5 - bst r11,2 - bld r11,1 - bst r10,2 - bld r11,2 - bst r0,0 - bld r10,2 - bst r14,0 - bld r0,0 - bst r25,0 - bld r14,0 - bst r25,6 - bld r25,0 - bst r15,7 - bld r25,6 - bst r14,3 - bld r15,7 - bst r0,0 - bld r14,3 - bst r14,1 - bld r0,0 - bst r24,0 - bld r14,1 - bst r25,4 - bld r24,0 - bst r25,7 - bld r25,4 - bst r14,7 - bld r25,7 - bst r0,0 - bld r14,7 - bst r14,2 - bld r0,0 - bst r15,0 - bld r14,2 - bst r25,2 - bld r15,0 - bst r15,6 - bld r25,2 - bst r15,3 - bld r15,6 - bst r0,0 - bld r15,3 - bst r14,4 - bld r0,0 - bst r25,1 - bld r14,4 - bst r24,6 - bld r25,1 - bst r15,5 - bld r24,6 - bst r24,3 - bld r15,5 - bst r0,0 - bld r24,3 - bst r14,5 - bld r0,0 - bst r24,1 - bld r14,5 - bst r24,4 - bld r24,1 - bst r25,5 - bld r24,4 - bst r24,7 - bld r25,5 - bst r0,0 - bld r24,7 - bst r14,6 - bld r0,0 - bst r15,1 - bld r14,6 - bst r24,2 - bld r15,1 - bst r15,4 - bld r24,2 - bst r25,3 - bld r15,4 - bst r0,0 - bld r25,3 - movw r20,r14 - movw r22,r24 - movw r14,r2 - movw r24,r4 - movw r2,r20 - movw r4,r22 - and r20,r6 - and r21,r7 - and r22,r8 - and r23,r9 - eor r10,r20 - eor r11,r21 - eor r12,r22 - eor r13,r23 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r14,r10 - eor r15,r11 - eor r24,r12 - eor r25,r13 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - or r0,r8 - eor r12,r0 - mov r0,r5 - or r0,r9 - eor r13,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r8 - and r0,r24 - eor r4,r0 - mov r0,r9 - and r0,r25 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r8,r0 - mov r0,r5 - and r0,r13 - eor r9,r0 - cp r19,r1 - breq 791f - inc r26 - ldi r27,5 - cpse r26,r27 - rjmp 375b - mov r26,r1 - eor r2,r18 - eor r3,r18 - eor r4,r18 - eor r5,r18 - rjmp 375b -791: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - bst r2,0 - bld r20,0 - bst r6,0 - bld r20,1 - bst r10,0 - bld r20,2 - bst r14,0 - bld r20,3 - bst r2,1 - bld r20,4 - bst r6,1 - bld r20,5 - bst r10,1 - bld r20,6 - bst r14,1 - bld r20,7 - bst r2,2 - bld r21,0 - bst r6,2 - bld r21,1 - bst r10,2 - bld r21,2 - bst r14,2 - bld r21,3 - bst r2,3 - bld r21,4 - bst r6,3 - bld r21,5 - bst r10,3 - bld r21,6 - bst r14,3 - bld r21,7 - bst r2,4 - bld r22,0 - bst r6,4 - bld r22,1 - bst r10,4 - bld r22,2 - bst r14,4 - bld r22,3 - bst r2,5 - bld r22,4 - bst r6,5 - bld r22,5 - bst r10,5 - bld r22,6 - bst r14,5 - bld r22,7 - bst r2,6 - bld r23,0 - bst r6,6 - bld r23,1 - bst r10,6 - bld r23,2 - bst r14,6 - bld r23,3 - bst r2,7 - bld r23,4 - bst r6,7 - bld r23,5 - bst r10,7 - bld r23,6 - bst r14,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r3,0 - bld r20,0 - bst r7,0 - bld r20,1 - bst r11,0 - bld r20,2 - bst r15,0 - bld r20,3 - bst r3,1 - bld r20,4 - bst r7,1 - bld r20,5 - bst r11,1 - bld r20,6 - bst r15,1 - bld r20,7 - bst r3,2 - bld r21,0 - bst r7,2 - bld r21,1 - bst r11,2 - bld r21,2 - bst r15,2 - bld r21,3 - bst r3,3 - bld r21,4 - bst r7,3 - bld r21,5 - bst r11,3 - bld r21,6 - bst r15,3 - bld r21,7 - bst r3,4 - bld r22,0 - bst r7,4 - bld r22,1 - bst r11,4 - bld r22,2 - bst r15,4 - bld r22,3 - bst r3,5 - bld r22,4 - bst r7,5 - bld r22,5 - bst r11,5 - bld r22,6 - bst r15,5 - bld r22,7 - bst r3,6 - bld r23,0 - bst r7,6 - bld r23,1 - bst r11,6 - bld r23,2 - bst r15,6 - bld r23,3 - bst r3,7 - bld r23,4 - bst r7,7 - bld r23,5 - bst r11,7 - bld r23,6 - bst r15,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r4,0 - bld r20,0 - bst r8,0 - bld r20,1 - bst r12,0 - bld r20,2 - bst r24,0 - bld r20,3 - bst r4,1 - bld r20,4 - bst r8,1 - bld r20,5 - bst r12,1 - bld r20,6 - bst r24,1 - bld r20,7 - bst r4,2 - bld r21,0 - bst r8,2 - bld r21,1 - bst r12,2 - bld r21,2 - bst r24,2 - bld r21,3 - bst r4,3 - bld r21,4 - bst r8,3 - bld r21,5 - bst r12,3 - bld r21,6 - bst r24,3 - bld r21,7 - bst r4,4 - bld r22,0 - bst r8,4 - bld r22,1 - bst r12,4 - bld r22,2 - bst r24,4 - bld r22,3 - bst r4,5 - bld r22,4 - bst r8,5 - bld r22,5 - bst r12,5 - bld r22,6 - bst r24,5 - bld r22,7 - bst r4,6 - bld r23,0 - bst r8,6 - bld r23,1 - bst r12,6 - bld r23,2 - bst r24,6 - bld r23,3 - bst r4,7 - bld r23,4 - bst r8,7 - bld r23,5 - bst r12,7 - bld r23,6 - bst r24,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - bst r5,0 - bld r20,0 - bst r9,0 - bld r20,1 - bst r13,0 - bld r20,2 - bst r25,0 - bld r20,3 - bst r5,1 - bld r20,4 - bst r9,1 - bld r20,5 - bst r13,1 - bld r20,6 - bst r25,1 - bld r20,7 - bst r5,2 - bld r21,0 - bst r9,2 - bld r21,1 - bst r13,2 - bld r21,2 - bst r25,2 - bld r21,3 - bst r5,3 - bld r21,4 - bst r9,3 - bld r21,5 - bst r13,3 - bld r21,6 - bst r25,3 - bld r21,7 - bst r5,4 - bld r22,0 - bst r9,4 - bld r22,1 - bst r13,4 - bld r22,2 - bst r25,4 - bld r22,3 - bst r5,5 - bld r22,4 - bst r9,5 - bld r22,5 - bst r13,5 - bld r22,6 - bst r25,5 - bld r22,7 - bst r5,6 - bld r23,0 - bst r9,6 - bld r23,1 - bst r13,6 - bld r23,2 - bst r25,6 - bld r23,3 - bst r5,7 - bld r23,4 - bst r9,7 - bld r23,5 - bst r13,7 - bld r23,6 - bst r25,7 - bld r23,7 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128t_decrypt, .-gift128t_decrypt - -#endif - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-util.h b/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/hyena.c b/hyena/Implementations/crypto_aead/hyenav1/rhys/hyena.c index 3af79fa..db5ba2b 100644 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys/hyena.c +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/hyena.c @@ -52,6 +52,23 @@ static void hyena_double_delta(unsigned char D[8]) } /** + * \brief Triples a delta value in the F(2^64) field. + * + * \param D The delta value to be tripled. + * + * D' = D ^ (D << 1) if the top-most bit is 0, or D' = D ^ (D << 1) ^ 0x1B + * otherwise. + */ +static void hyena_triple_delta(unsigned char D[8]) +{ + unsigned index; + unsigned char mask = (unsigned char)(((signed char)(D[0])) >> 7); + for (index = 0; index < 7; ++index) + D[index] ^= (D[index] << 1) | (D[index + 1] >> 7); + D[7] ^= (D[7] << 1) ^ (mask & 0x1B); +} + +/** * \brief Process the associated data for HYENA. * * \param ks Key schedule for the GIFT-128 cipher. @@ -66,27 +83,26 @@ static void hyena_process_ad unsigned long long adlen) { unsigned char feedback[16]; - hyena_double_delta(D); while (adlen > 16) { + hyena_double_delta(D); memcpy(feedback, ad, 16); lw_xor_block(feedback + 8, Y + 8, 8); lw_xor_block(feedback + 8, D, 8); lw_xor_block(Y, feedback, 16); gift128n_encrypt(ks, Y, Y); - hyena_double_delta(D); ad += 16; adlen -= 16; } if (adlen == 16) { - hyena_double_delta(D); + hyena_triple_delta(D); memcpy(feedback, ad, 16); lw_xor_block(feedback + 8, Y + 8, 8); lw_xor_block(feedback + 8, D, 8); lw_xor_block(Y, feedback, 16); } else { unsigned temp = (unsigned)adlen; - hyena_double_delta(D); - hyena_double_delta(D); + hyena_triple_delta(D); + hyena_triple_delta(D); memcpy(feedback, ad, temp); feedback[temp] = 0x01; memset(feedback + temp + 1, 0, 15 - temp); @@ -116,8 +132,7 @@ int hyena_aead_encrypt *clen = mlen + HYENA_TAG_SIZE; /* Set up the key schedule and use it to encrypt the nonce */ - if (!gift128n_init(&ks, k, HYENA_KEY_SIZE)) - return -1; + gift128n_init(&ks, k); Y[0] = 0; if (adlen == 0) Y[0] |= 0x01; @@ -149,8 +164,7 @@ int hyena_aead_encrypt } gift128n_encrypt(&ks, Y, Y); if (mlen == 16) { - hyena_double_delta(D); - hyena_double_delta(D); + hyena_triple_delta(D); memcpy(feedback, m, 16); lw_xor_block(feedback + 8, Y + 8, 8); lw_xor_block(feedback + 8, D, 8); @@ -159,9 +173,8 @@ int hyena_aead_encrypt c += 16; } else { unsigned temp = (unsigned)mlen; - hyena_double_delta(D); - hyena_double_delta(D); - hyena_double_delta(D); + hyena_triple_delta(D); + hyena_triple_delta(D); memcpy(feedback, m, temp); feedback[temp] = 0x01; memset(feedback + temp + 1, 0, 15 - temp); @@ -207,8 +220,7 @@ int hyena_aead_decrypt *mlen = clen - HYENA_TAG_SIZE; /* Set up the key schedule and use it to encrypt the nonce */ - if (!gift128n_init(&ks, k, HYENA_KEY_SIZE)) - return -1; + gift128n_init(&ks, k); Y[0] = 0; if (adlen == 0) Y[0] |= 0x01; @@ -242,8 +254,7 @@ int hyena_aead_decrypt } gift128n_encrypt(&ks, Y, Y); if (clen == 16) { - hyena_double_delta(D); - hyena_double_delta(D); + hyena_triple_delta(D); memcpy(feedback + 8, c + 8, 8); lw_xor_block_2_src(m, c, Y, 16); memcpy(feedback, m, 8); @@ -252,9 +263,8 @@ int hyena_aead_decrypt c += 16; } else { unsigned temp = (unsigned)clen; - hyena_double_delta(D); - hyena_double_delta(D); - hyena_double_delta(D); + hyena_triple_delta(D); + hyena_triple_delta(D); if (temp > 8) { memcpy(feedback + 8, c + 8, temp - 8); lw_xor_block_2_src(m, c, Y, temp); diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128-config.h b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128-config.h new file mode 100644 index 0000000..62131ba --- /dev/null +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128-config.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIFT128_CONFIG_H +#define LW_INTERNAL_GIFT128_CONFIG_H + +/** + * \file internal-gift128-config.h + * \brief Configures the variant of GIFT-128 to use. + */ + +/** + * \brief Select the full variant of GIFT-128. + * + * The full variant requires 320 bytes for the key schedule and uses the + * fixslicing method to implement encryption and decryption. + */ +#define GIFT128_VARIANT_FULL 0 + +/** + * \brief Select the small variant of GIFT-128. + * + * The small variant requires 80 bytes for the key schedule. The rest + * of the key schedule is expanded on the fly during encryption. + * + * The fixslicing method is used to implement encryption and the slower + * bitslicing method is used to implement decryption. The small variant + * is suitable when memory is at a premium, decryption is not needed, + * but encryption performance is still important. + */ +#define GIFT128_VARIANT_SMALL 1 + +/** + * \brief Select the tiny variant of GIFT-128. + * + * The tiny variant requires 16 bytes for the key schedule and uses the + * bitslicing method to implement encryption and decryption. It is suitable + * for use when memory is very tight and performance is not critical. + */ +#define GIFT128_VARIANT_TINY 2 + +/** + * \def GIFT128_VARIANT + * \brief Selects the default variant of GIFT-128 to use on this platform. + */ +/** + * \def GIFT128_VARIANT_ASM + * \brief Defined to 1 if the GIFT-128 implementation has been replaced + * with an assembly code version. + */ +#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 1 +#endif +#if !defined(GIFT128_VARIANT) +#define GIFT128_VARIANT GIFT128_VARIANT_FULL +#endif +#if !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 0 +#endif + +#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.c b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.c index 681dbc8..c6ac5ec 100644 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.c +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.c @@ -23,8 +23,12 @@ #include "internal-gift128.h" #include "internal-util.h" +#if !GIFT128_VARIANT_ASM + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC[40] = { +static uint32_t const GIFT128_RC_fixsliced[40] = { 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, @@ -34,6 +38,246 @@ static uint32_t const GIFT128_RC[40] = { 0xc001a000, 0x14500002, 0x01020181, 0x8000001a }; +#endif + +#if GIFT128_VARIANT != GIFT128_VARIANT_FULL + +/* Round constants for GIFT-128 in the bitsliced representation */ +static uint8_t const GIFT128_RC[40] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, + 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A +}; + +#endif + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 8 bits with respect to the next: + * + * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 + * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 + * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 + * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 + * + * The most efficient permutation from the online generator was P3, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P3 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate8(_x); \ + } while (0) +#define PERM1(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate16(_x); \ + } while (0) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate24(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +#define INV_PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x00550055, 9); \ + bit_permute_step(x, 0x00003333, 18); \ + bit_permute_step(x, 0x000f000f, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate8(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) \ + do { \ + uint32_t _x = rightRotate16(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate24(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = (x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +/** + * \brief Converts the GIFT-128 nibble-based representation into word-based. + * + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. + * + * The \a input and \a output buffers can be the same buffer. + */ +static void gift128n_to_words + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input buffer into 32-bit words. We use the nibble order + * from the HYENA submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-128 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 12); + s1 = le_load_word32(input + 8); + s2 = le_load_word32(input + 4); + s3 = le_load_word32(input); + + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + PERM_WORDS(s2); + PERM_WORDS(s3); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)s2; + output[3] = (uint8_t)s3; + output[4] = (uint8_t)(s0 >> 8); + output[5] = (uint8_t)(s1 >> 8); + output[6] = (uint8_t)(s2 >> 8); + output[7] = (uint8_t)(s3 >> 8); + output[8] = (uint8_t)(s0 >> 16); + output[9] = (uint8_t)(s1 >> 16); + output[10] = (uint8_t)(s2 >> 16); + output[11] = (uint8_t)(s3 >> 16); + output[12] = (uint8_t)(s0 >> 24); + output[13] = (uint8_t)(s1 >> 24); + output[14] = (uint8_t)(s2 >> 24); + output[15] = (uint8_t)(s3 >> 24); +} + +/** + * \brief Converts the GIFT-128 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift128n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s3 contains the least significant */ + s0 = (((uint32_t)(input[12])) << 24) | + (((uint32_t)(input[8])) << 16) | + (((uint32_t)(input[4])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[13])) << 24) | + (((uint32_t)(input[9])) << 16) | + (((uint32_t)(input[5])) << 8) | + ((uint32_t)(input[1])); + s2 = (((uint32_t)(input[14])) << 24) | + (((uint32_t)(input[10])) << 16) | + (((uint32_t)(input[6])) << 8) | + ((uint32_t)(input[2])); + s3 = (((uint32_t)(input[15])) << 24) | + (((uint32_t)(input[11])) << 16) | + (((uint32_t)(input[7])) << 8) | + ((uint32_t)(input[3])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + INV_PERM_WORDS(s2); + INV_PERM_WORDS(s3); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 12, s0); + le_store_word32(output + 8, s1); + le_store_word32(output + 4, s2); + le_store_word32(output, s3); +} + +void gift128n_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_encrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +void gift128n_decrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_decrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /** * \brief Swaps bits within two words. * @@ -202,21 +446,27 @@ static void gift128b_compute_round_keys /* Keys 8, 9, 18, and 19 do not need any adjustment */ } +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL /* Derive the fixsliced keys for the remaining rounds 11..40 */ for (index = 20; index < 80; index += 10) { gift128b_derive_keys(ks->k + index, ks->k + index - 20); } +#endif } -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) { - if (!ks || !key || key_len != 16) - return 0; gift128b_compute_round_keys (ks, be_load_word32(key), be_load_word32(key + 4), be_load_word32(key + 8), be_load_word32(key + 12)); - return 1; +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission */ + gift128b_compute_round_keys + (ks, le_load_word32(key + 12), le_load_word32(key + 8), + le_load_word32(key + 4), le_load_word32(key)); } /** @@ -521,11 +771,37 @@ int gift128b_init gift128b_inv_sbox(s3, s1, s2, s0); \ } while (0) +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) +{ + /* Mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = be_load_word32(key + 12); + ks->k[1] = be_load_word32(key + 4); + ks->k[2] = be_load_word32(key + 8); + ks->k[3] = be_load_word32(key); +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission + * and mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = le_load_word32(key); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key + 12); +} + +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into the state buffer and convert from big endian */ s0 = be_load_word32(input); @@ -534,14 +810,20 @@ void gift128b_encrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -555,6 +837,7 @@ void gift128b_encrypt_preloaded const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into local variables */ s0 = input[0]; @@ -563,14 +846,20 @@ void gift128b_encrypt_preloaded s3 = input[3]; /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer */ output[0] = s0; @@ -579,7 +868,55 @@ void gift128b_encrypt_preloaded output[3] = s3; } -void gift128b_decrypt +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; + uint32_t k[20]; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { @@ -592,14 +929,14 @@ void gift128b_decrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -608,173 +945,308 @@ void gift128b_decrypt be_store_word32(output + 12, s3); } -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { - /* Use the little-endian key byte order from the HYENA submission */ - if (!ks || !key || key_len != 16) - return 0; - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); - return 1; + uint32_t s0, s1, s2, s3; + + /* Copy the plaintext into local variables */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; + + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_encrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); } -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); + /* Copy the plaintext into the state buffer */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -void gift128n_encrypt +void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) + const unsigned char *input, uint32_t tweak) { + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if (((round + 1) % 5) == 0 && round < 39) + s0 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} + uint32_t s0, s1, s2, s3; -/* 4-bit tweak values expanded to 32-bit */ -static uint32_t const GIFT128_tweaks[16] = { - 0x00000000, 0xe1e1e1e1, 0xd2d2d2d2, 0x33333333, - 0xb4b4b4b4, 0x55555555, 0x66666666, 0x87878787, - 0x78787878, 0x99999999, 0xaaaaaaaa, 0x4b4b4b4b, - 0xcccccccc, 0x2d2d2d2d, 0x1e1e1e1e, 0xffffffff -}; + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); -void gift128t_encrypt + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + +void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; - /* Copy the plaintext into the state buffer and convert from nibbles */ + /* Copy the ciphertext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); @@ -782,25 +1254,24 @@ void gift128t_encrypt s3 = be_load_word32(output + 12); /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + * Every 5 rounds except the first we add the tweak value to the state */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - /* Pack the state into the ciphertext buffer in nibble form */ + /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); @@ -808,37 +1279,211 @@ void gift128t_encrypt gift128n_to_nibbles(output, output); } +#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +/* The small variant uses fixslicing for encryption, but we need to change + * to bitslicing for decryption because of the difficulty of fast-forwarding + * the fixsliced key schedule to the end. So the tiny variant is used for + * decryption when the small variant is selected. Since the NIST AEAD modes + * for GIFT-128 only use the block encrypt operation, the inefficiencies + * in decryption don't matter all that much */ + +/** + * \def gift128b_load_and_forward_schedule() + * \brief Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 40; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 10 times for the full 40 rounds. The overall + * effect is to apply a "20 right and 40 left" bit-rotation to every + * word in the key schedule. That is equivalent to "4 right and 8 left" + * on the 16-bit sub-words. + */ +#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#else +/* The small variant needs to also undo some of the rotations that were + * done to generate the fixsliced version of the key schedule */ +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ + gift128b_swap_move(w3, w3, 0x00003333U, 18); \ + gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ + gift128b_swap_move(w3, w3, 0x00550055U, 9); \ + gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ + gift128b_swap_move(w1, w1, 0x00003333U, 18); \ + gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ + gift128b_swap_move(w1, w1, 0x00550055U, 9); \ + gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ + gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ + gift128b_swap_move(w2, w2, 0x03030303U, 6); \ + gift128b_swap_move(w2, w2, 0x11111111U, 3); \ + gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ + gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ + gift128b_swap_move(w0, w0, 0x03030303U, 6); \ + gift128b_swap_move(w0, w0, 0x11111111U, 3); \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#endif + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); + + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Copy the ciphertext into the state buffer and convert from nibbles */ + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); s2 = be_load_word32(output + 8); s3 = be_load_word32(output + 12); - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if ((round % 5) == 0 && round < 40) + s0 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); @@ -847,3 +1492,7 @@ void gift128t_decrypt be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } + +#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +#endif /* !GIFT128_VARIANT_ASM */ diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.h b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.h index 1ac40e5..f57d143 100644 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.h +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128.h @@ -47,11 +47,13 @@ * in any of the NIST submissions so we don't bother with it in this library. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ #include #include +#include "internal-gift128-config.h" #ifdef __cplusplus extern "C" { @@ -63,16 +65,23 @@ extern "C" { #define GIFT128_BLOCK_SIZE 16 /** - * \brief Number of round keys for the fixsliced representation of GIFT-128. + * \var GIFT128_ROUND_KEYS + * \brief Number of round keys for the GIFT-128 key schedule. */ +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY +#define GIFT128_ROUND_KEYS 4 +#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL +#define GIFT128_ROUND_KEYS 20 +#else #define GIFT128_ROUND_KEYS 80 +#endif /** * \brief Structure of the key schedule for GIFT-128 (bit-sliced). */ typedef struct { - /** Pre-computed round keys in the fixsliced form */ + /** Pre-computed round keys for bit-sliced GIFT-128 */ uint32_t k[GIFT128_ROUND_KEYS]; } gift128b_key_schedule_t; @@ -81,14 +90,9 @@ typedef struct * \brief Initializes the key schedule for GIFT-128 (bit-sliced). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). @@ -145,14 +149,9 @@ typedef gift128b_key_schedule_t gift128n_key_schedule_t; * \brief Initializes the key schedule for GIFT-128 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). @@ -182,13 +181,31 @@ void gift128n_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); +/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ +#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ +#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ +#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ +#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ +#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ +#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ +#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ +#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ +#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ +#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ +#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ +#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ +#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ +#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ +#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ +#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ + /** * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). * * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -200,7 +217,7 @@ void gift128n_decrypt */ void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); /** * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). @@ -208,7 +225,7 @@ void gift128t_encrypt * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -220,7 +237,7 @@ void gift128t_encrypt */ void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); #ifdef __cplusplus } diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-avr.S new file mode 100644 index 0000000..2aae304 --- /dev/null +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-avr.S @@ -0,0 +1,4712 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 40 +table_0: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +302: + rcall 455f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 455f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 455f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 455f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 302b + rjmp 804f +455: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +804: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +370: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + cpse r16,r1 + rjmp 370b + rjmp 867f +522: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +867: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r19,r1 + mov r26,r1 +307: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + movw r20,r2 + movw r22,r4 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + mov r0,r8 + and r0,r22 + eor r12,r0 + mov r0,r9 + and r0,r23 + eor r13,r0 + movw r2,r14 + movw r4,r24 + movw r14,r20 + movw r24,r22 + bst r2,1 + bld r0,0 + bst r2,4 + bld r2,1 + bst r4,0 + bld r2,4 + bst r2,2 + bld r4,0 + bst r3,0 + bld r2,2 + bst r2,3 + bld r3,0 + bst r3,4 + bld r2,3 + bst r4,3 + bld r3,4 + bst r3,6 + bld r4,3 + bst r5,3 + bld r3,6 + bst r3,5 + bld r5,3 + bst r4,7 + bld r3,5 + bst r5,6 + bld r4,7 + bst r5,1 + bld r5,6 + bst r2,5 + bld r5,1 + bst r4,4 + bld r2,5 + bst r4,2 + bld r4,4 + bst r3,2 + bld r4,2 + bst r3,3 + bld r3,2 + bst r3,7 + bld r3,3 + bst r5,7 + bld r3,7 + bst r5,5 + bld r5,7 + bst r4,5 + bld r5,5 + bst r4,6 + bld r4,5 + bst r5,2 + bld r4,6 + bst r3,1 + bld r5,2 + bst r2,7 + bld r3,1 + bst r5,4 + bld r2,7 + bst r4,1 + bld r5,4 + bst r2,6 + bld r4,1 + bst r5,0 + bld r2,6 + bst r0,0 + bld r5,0 + bst r6,0 + bld r0,0 + bst r6,1 + bld r6,0 + bst r6,5 + bld r6,1 + bst r8,5 + bld r6,5 + bst r8,7 + bld r8,5 + bst r9,7 + bld r8,7 + bst r9,6 + bld r9,7 + bst r9,2 + bld r9,6 + bst r7,2 + bld r9,2 + bst r7,0 + bld r7,2 + bst r0,0 + bld r7,0 + bst r6,2 + bld r0,0 + bst r7,1 + bld r6,2 + bst r6,4 + bld r7,1 + bst r8,1 + bld r6,4 + bst r6,7 + bld r8,1 + bst r9,5 + bld r6,7 + bst r8,6 + bld r9,5 + bst r9,3 + bld r8,6 + bst r7,6 + bld r9,3 + bst r9,0 + bld r7,6 + bst r0,0 + bld r9,0 + bst r6,3 + bld r0,0 + bst r7,5 + bld r6,3 + bst r8,4 + bld r7,5 + bst r8,3 + bld r8,4 + bst r7,7 + bld r8,3 + bst r9,4 + bld r7,7 + bst r8,2 + bld r9,4 + bst r7,3 + bld r8,2 + bst r7,4 + bld r7,3 + bst r8,0 + bld r7,4 + bst r0,0 + bld r8,0 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r10,2 + bld r10,0 + bst r11,2 + bld r10,2 + bst r11,1 + bld r11,2 + bst r10,5 + bld r11,1 + bst r12,6 + bld r10,5 + bst r13,0 + bld r12,6 + bst r10,3 + bld r13,0 + bst r11,6 + bld r10,3 + bst r13,1 + bld r11,6 + bst r10,7 + bld r13,1 + bst r13,6 + bld r10,7 + bst r13,3 + bld r13,6 + bst r11,7 + bld r13,3 + bst r13,5 + bld r11,7 + bst r12,7 + bld r13,5 + bst r13,4 + bld r12,7 + bst r12,3 + bld r13,4 + bst r11,4 + bld r12,3 + bst r12,1 + bld r11,4 + bst r10,4 + bld r12,1 + bst r12,2 + bld r10,4 + bst r11,0 + bld r12,2 + bst r10,1 + bld r11,0 + bst r10,6 + bld r10,1 + bst r13,2 + bld r10,6 + bst r11,3 + bld r13,2 + bst r11,5 + bld r11,3 + bst r12,5 + bld r11,5 + bst r12,4 + bld r12,5 + bst r12,0 + bld r12,4 + bst r0,0 + bld r12,0 + bst r14,0 + bld r0,0 + bst r14,3 + bld r14,0 + bst r15,7 + bld r14,3 + bst r25,6 + bld r15,7 + bst r25,0 + bld r25,6 + bst r0,0 + bld r25,0 + bst r14,1 + bld r0,0 + bst r14,7 + bld r14,1 + bst r25,7 + bld r14,7 + bst r25,4 + bld r25,7 + bst r24,0 + bld r25,4 + bst r0,0 + bld r24,0 + bst r14,2 + bld r0,0 + bst r15,3 + bld r14,2 + bst r15,6 + bld r15,3 + bst r25,2 + bld r15,6 + bst r15,0 + bld r25,2 + bst r0,0 + bld r15,0 + bst r14,4 + bld r0,0 + bst r24,3 + bld r14,4 + bst r15,5 + bld r24,3 + bst r24,6 + bld r15,5 + bst r25,1 + bld r24,6 + bst r0,0 + bld r25,1 + bst r14,5 + bld r0,0 + bst r24,7 + bld r14,5 + bst r25,5 + bld r24,7 + bst r24,4 + bld r25,5 + bst r24,1 + bld r24,4 + bst r0,0 + bld r24,1 + bst r14,6 + bld r0,0 + bst r25,3 + bld r14,6 + bst r15,4 + bld r25,3 + bst r24,2 + bld r15,4 + bst r15,1 + bld r24,2 + bst r0,0 + bld r15,1 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldd r20,Y+13 + ldd r21,Y+14 + ldd r22,Y+15 + ldd r23,Y+16 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + adc r20,r1 + lsl r20 + rol r21 + adc r20,r1 + lsl r20 + rol r21 + adc r20,r1 + lsl r20 + rol r21 + adc r20,r1 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + ldd r0,Y+1 + std Y+1,r20 + ldd r20,Y+5 + std Y+5,r0 + ldd r0,Y+9 + std Y+9,r20 + std Y+13,r0 + ldd r0,Y+2 + std Y+2,r21 + ldd r21,Y+6 + std Y+6,r0 + ldd r0,Y+10 + std Y+10,r21 + std Y+14,r0 + ldd r0,Y+3 + std Y+3,r22 + ldd r22,Y+7 + std Y+7,r0 + ldd r0,Y+11 + std Y+11,r22 + std Y+15,r0 + ldd r0,Y+4 + std Y+4,r23 + ldd r23,Y+8 + std Y+8,r0 + ldd r0,Y+12 + std Y+12,r23 + std Y+16,r0 + ldi r20,128 + eor r25,r20 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + inc r19 + cpi r19,40 + breq 727f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 307b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 307b +727: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r19,40 + mov r26,r1 +375: + ldd r0,Y+13 + ldd r20,Y+9 + std Y+9,r0 + ldd r0,Y+5 + std Y+5,r20 + ldd r20,Y+1 + std Y+1,r0 + ldd r0,Y+14 + ldd r21,Y+10 + std Y+10,r0 + ldd r0,Y+6 + std Y+6,r21 + ldd r21,Y+2 + std Y+2,r0 + ldd r0,Y+15 + ldd r22,Y+11 + std Y+11,r0 + ldd r0,Y+7 + std Y+7,r22 + ldd r22,Y+3 + std Y+3,r0 + ldd r0,Y+16 + ldd r23,Y+12 + std Y+12,r0 + ldd r0,Y+8 + std Y+8,r23 + ldd r23,Y+4 + std Y+4,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + or r21,r0 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + std Y+13,r20 + std Y+14,r21 + std Y+15,r22 + std Y+16,r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldi r20,128 + eor r25,r20 + dec r19 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + bst r2,1 + bld r0,0 + bst r5,0 + bld r2,1 + bst r2,6 + bld r5,0 + bst r4,1 + bld r2,6 + bst r5,4 + bld r4,1 + bst r2,7 + bld r5,4 + bst r3,1 + bld r2,7 + bst r5,2 + bld r3,1 + bst r4,6 + bld r5,2 + bst r4,5 + bld r4,6 + bst r5,5 + bld r4,5 + bst r5,7 + bld r5,5 + bst r3,7 + bld r5,7 + bst r3,3 + bld r3,7 + bst r3,2 + bld r3,3 + bst r4,2 + bld r3,2 + bst r4,4 + bld r4,2 + bst r2,5 + bld r4,4 + bst r5,1 + bld r2,5 + bst r5,6 + bld r5,1 + bst r4,7 + bld r5,6 + bst r3,5 + bld r4,7 + bst r5,3 + bld r3,5 + bst r3,6 + bld r5,3 + bst r4,3 + bld r3,6 + bst r3,4 + bld r4,3 + bst r2,3 + bld r3,4 + bst r3,0 + bld r2,3 + bst r2,2 + bld r3,0 + bst r4,0 + bld r2,2 + bst r2,4 + bld r4,0 + bst r0,0 + bld r2,4 + bst r6,0 + bld r0,0 + bst r7,0 + bld r6,0 + bst r7,2 + bld r7,0 + bst r9,2 + bld r7,2 + bst r9,6 + bld r9,2 + bst r9,7 + bld r9,6 + bst r8,7 + bld r9,7 + bst r8,5 + bld r8,7 + bst r6,5 + bld r8,5 + bst r6,1 + bld r6,5 + bst r0,0 + bld r6,1 + bst r6,2 + bld r0,0 + bst r9,0 + bld r6,2 + bst r7,6 + bld r9,0 + bst r9,3 + bld r7,6 + bst r8,6 + bld r9,3 + bst r9,5 + bld r8,6 + bst r6,7 + bld r9,5 + bst r8,1 + bld r6,7 + bst r6,4 + bld r8,1 + bst r7,1 + bld r6,4 + bst r0,0 + bld r7,1 + bst r6,3 + bld r0,0 + bst r8,0 + bld r6,3 + bst r7,4 + bld r8,0 + bst r7,3 + bld r7,4 + bst r8,2 + bld r7,3 + bst r9,4 + bld r8,2 + bst r7,7 + bld r9,4 + bst r8,3 + bld r7,7 + bst r8,4 + bld r8,3 + bst r7,5 + bld r8,4 + bst r0,0 + bld r7,5 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r12,0 + bld r10,0 + bst r12,4 + bld r12,0 + bst r12,5 + bld r12,4 + bst r11,5 + bld r12,5 + bst r11,3 + bld r11,5 + bst r13,2 + bld r11,3 + bst r10,6 + bld r13,2 + bst r10,1 + bld r10,6 + bst r11,0 + bld r10,1 + bst r12,2 + bld r11,0 + bst r10,4 + bld r12,2 + bst r12,1 + bld r10,4 + bst r11,4 + bld r12,1 + bst r12,3 + bld r11,4 + bst r13,4 + bld r12,3 + bst r12,7 + bld r13,4 + bst r13,5 + bld r12,7 + bst r11,7 + bld r13,5 + bst r13,3 + bld r11,7 + bst r13,6 + bld r13,3 + bst r10,7 + bld r13,6 + bst r13,1 + bld r10,7 + bst r11,6 + bld r13,1 + bst r10,3 + bld r11,6 + bst r13,0 + bld r10,3 + bst r12,6 + bld r13,0 + bst r10,5 + bld r12,6 + bst r11,1 + bld r10,5 + bst r11,2 + bld r11,1 + bst r10,2 + bld r11,2 + bst r0,0 + bld r10,2 + bst r14,0 + bld r0,0 + bst r25,0 + bld r14,0 + bst r25,6 + bld r25,0 + bst r15,7 + bld r25,6 + bst r14,3 + bld r15,7 + bst r0,0 + bld r14,3 + bst r14,1 + bld r0,0 + bst r24,0 + bld r14,1 + bst r25,4 + bld r24,0 + bst r25,7 + bld r25,4 + bst r14,7 + bld r25,7 + bst r0,0 + bld r14,7 + bst r14,2 + bld r0,0 + bst r15,0 + bld r14,2 + bst r25,2 + bld r15,0 + bst r15,6 + bld r25,2 + bst r15,3 + bld r15,6 + bst r0,0 + bld r15,3 + bst r14,4 + bld r0,0 + bst r25,1 + bld r14,4 + bst r24,6 + bld r25,1 + bst r15,5 + bld r24,6 + bst r24,3 + bld r15,5 + bst r0,0 + bld r24,3 + bst r14,5 + bld r0,0 + bst r24,1 + bld r14,5 + bst r24,4 + bld r24,1 + bst r25,5 + bld r24,4 + bst r24,7 + bld r25,5 + bst r0,0 + bld r24,7 + bst r14,6 + bld r0,0 + bst r15,1 + bld r14,6 + bst r24,2 + bld r15,1 + bst r15,4 + bld r24,2 + bst r25,3 + bld r15,4 + bst r0,0 + bld r25,3 + movw r20,r14 + movw r22,r24 + movw r14,r2 + movw r24,r4 + movw r2,r20 + movw r4,r22 + and r20,r6 + and r21,r7 + and r22,r8 + and r23,r9 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + cp r19,r1 + breq 791f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 375b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 375b +791: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-full-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-full-avr.S new file mode 100644 index 0000000..3a7e6fb --- /dev/null +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-full-avr.S @@ -0,0 +1,8173 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + ldi r24,4 +33: + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r29 + ror r28 + ror r0 + lsr r29 + ror r28 + ror r0 + or r29,r0 + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r28 + mov r28,r4 + mov r4,r0 + mov r0,r29 + mov r29,r5 + mov r5,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + mov r0,r6 + mov r6,r10 + mov r10,r0 + mov r0,r7 + mov r7,r11 + mov r11,r0 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + st Z,r29 + std Z+1,r23 + std Z+2,r28 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+4,r29 + std Z+5,r23 + std Z+6,r28 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r28,Z+10 + ldd r29,Z+11 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+8,r29 + std Z+9,r23 + std Z+10,r28 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+12,r29 + std Z+13,r23 + std Z+14,r28 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+16,r29 + std Z+17,r23 + std Z+18,r28 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+20,r29 + std Z+21,r23 + std Z+22,r28 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+24,r29 + std Z+25,r23 + std Z+26,r28 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+28,r29 + std Z+29,r23 + std Z+30,r28 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + adiw r30,40 + movw r26,r30 + subi r26,80 + sbc r27,r1 + ldi r24,6 +1274: + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r2 + eor r19,r3 + andi r18,51 + andi r19,51 + eor r2,r18 + eor r3,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + movw r18,r22 + movw r20,r28 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + andi r28,204 + andi r29,204 + or r28,r21 + or r29,r18 + or r22,r19 + or r23,r20 + movw r18,r28 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r28 + eor r19,r29 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r28 + std Z+5,r29 + std Z+6,r22 + std Z+7,r23 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + swap r3 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + swap r5 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r29 + adc r29,r1 + lsl r29 + adc r29,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r28 + std Z+15,r29 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + ldi r25,85 + and r2,r25 + and r3,r25 + and r4,r25 + and r5,r25 + or r2,r19 + or r3,r20 + or r4,r21 + or r5,r18 + std Z+16,r4 + std Z+17,r5 + std Z+18,r2 + std Z+19,r3 + movw r18,r22 + movw r20,r28 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + andi r28,170 + andi r29,170 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + or r22,r18 + or r23,r19 + or r28,r20 + or r29,r21 + std Z+20,r29 + std Z+21,r22 + std Z+22,r23 + std Z+23,r28 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r14,r18 + movw r16,r20 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + eor r14,r18 + eor r15,r19 + eor r16,r20 + eor r17,r21 + ldi r25,8 + and r14,r25 + and r15,r25 + andi r16,8 + andi r17,8 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + ldi r17,15 + and r2,r17 + and r3,r17 + and r4,r17 + and r5,r17 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + movw r18,r28 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r2,r22 + movw r4,r28 + ldi r16,1 + and r2,r16 + and r3,r16 + and r4,r16 + and r5,r16 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + or r2,r18 + or r3,r19 + movw r18,r28 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r2,r18 + or r3,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r4,r18 + or r5,r19 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r4,r22 + or r5,r23 + std Z+28,r2 + std Z+29,r3 + std Z+30,r4 + std Z+31,r5 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + std Z+32,r3 + std Z+33,r2 + std Z+34,r4 + std Z+35,r5 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r28 + mov r28,r29 + mov r29,r0 + lsl r28 + rol r29 + adc r28,r1 + lsl r28 + rol r29 + adc r28,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r28 + std Z+39,r29 + dec r24 + breq 1733f + adiw r30,40 + rjmp 1274b +1733: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rcall 283f + rjmp 1021f +283: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +1021: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,160 + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rcall 286f + rjmp 1024f +286: + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r1 + lsr r22 + ror r0 + lsr r22 + ror r0 + or r22,r0 + mov r0,r1 + lsr r23 + ror r0 + lsr r23 + ror r0 + or r23,r0 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + swap r4 + swap r5 + swap r6 + swap r7 + lsl r8 + adc r8,r1 + lsl r8 + adc r8,r1 + lsl r9 + adc r9,r1 + lsl r9 + adc r9,r1 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,119 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,17 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1024: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 311f + rjmp 1049f +311: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,17 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,119 + and r14,r16 + and r15,r16 + andi r24,119 + andi r25,119 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + swap r6 + swap r7 + swap r8 + swap r9 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + mov r0,r1 + lsr r12 + ror r0 + lsr r12 + ror r0 + or r12,r0 + mov r0,r1 + lsr r13 + ror r0 + lsr r13 + ror r0 + or r13,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + ret +1049: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,160 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 314f + rjmp 1052f +314: + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r13 + mov r13,r12 + mov r12,r11 + mov r11,r10 + mov r10,r0 + mov r0,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + mov r0,r1 + lsr r4 + ror r0 + lsr r4 + ror r0 + or r4,r0 + mov r0,r1 + lsr r5 + ror r0 + lsr r5 + ror r0 + or r5,r0 + swap r6 + swap r7 + swap r8 + swap r9 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + com r2 + com r3 + com r4 + com r5 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + com r2 + com r3 + com r4 + com r5 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + dec r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + ld r23,-X + ld r22,-X + ld r21,-X + ld r20,-X + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,119 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r25 + ror r24 + ror r15 + ror r14 + lsr r25 + ror r24 + ror r15 + ror r14 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,17 + and r14,r16 + and r15,r16 + andi r24,17 + andi r25,17 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + ret +1052: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif + +#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-small-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-small-avr.S new file mode 100644 index 0000000..6f2d68b --- /dev/null +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-small-avr.S @@ -0,0 +1,9331 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +33: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 329f + rcall 329f + rjmp 1541f +329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +1067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +934: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 1086f + cpse r16,r1 + rjmp 934b + rjmp 1431f +1086: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1431: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r19,20 +1: + ld r2,Z+ + ld r3,Z+ + ld r4,Z+ + ld r5,Z+ + std Y+1,r2 + std Y+2,r3 + std Y+3,r4 + std Y+4,r5 + adiw r28,4 + dec r19 + brne 1b + subi r28,80 + sbc r29,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,20 + adiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,40 + sbiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,60 + adiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,80 + sbiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,100 + adiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 1095f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,120 + sbiw r26,40 + rcall 357f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 357f + rjmp 1570f +357: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,17 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,119 + and r14,r16 + and r15,r16 + andi r24,119 + andi r25,119 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + swap r6 + swap r7 + swap r8 + swap r9 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + mov r0,r1 + lsr r12 + ror r0 + lsr r12 + ror r0 + or r12,r0 + mov r0,r1 + lsr r13 + ror r0 + lsr r13 + ror r0 + or r13,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + ret +1095: + movw r30,r26 + sbiw r30,40 + push r5 + push r4 + push r3 + push r2 + push r9 + push r8 + push r7 + push r6 + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,68 + andi r21,68 + andi r22,85 + andi r23,85 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + st Z,r26 + std Z+1,r27 + std Z+2,r16 + std Z+3,r17 + movw r20,r2 + movw r22,r4 + andi r20,51 + andi r21,51 + andi r22,51 + andi r23,51 + ldi r19,204 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + or r4,r23 + or r5,r20 + or r2,r21 + or r3,r22 + movw r20,r4 + movw r22,r2 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r4 + eor r21,r5 + eor r22,r2 + eor r23,r3 + mov r20,r1 + andi r21,17 + andi r22,85 + andi r23,85 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + std Z+4,r4 + std Z+5,r5 + std Z+6,r2 + std Z+7,r3 + ldd r2,Z+8 + ldd r3,Z+9 + ldd r4,Z+10 + ldd r5,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r16 + adc r16,r1 + lsl r16 + adc r16,r1 + swap r17 + std Z+8,r26 + std Z+9,r27 + std Z+10,r16 + std Z+11,r17 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + std Z+12,r2 + std Z+13,r3 + std Z+14,r4 + std Z+15,r5 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r16,Z+22 + ldd r17,Z+23 + movw r20,r26 + movw r22,r16 + andi r20,170 + andi r21,170 + andi r22,170 + andi r23,170 + andi r26,85 + andi r27,85 + andi r16,85 + andi r17,85 + or r26,r21 + or r27,r22 + or r16,r23 + or r17,r20 + std Z+16,r16 + std Z+17,r17 + std Z+18,r26 + std Z+19,r27 + movw r20,r2 + movw r22,r4 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + ldi r19,170 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + or r2,r20 + or r3,r21 + or r4,r22 + or r5,r23 + std Z+20,r5 + std Z+21,r2 + std Z+22,r3 + std Z+23,r4 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r16,Z+30 + ldd r17,Z+31 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,120 + andi r21,120 + andi r22,120 + andi r23,120 + movw r6,r20 + movw r8,r22 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldi r19,8 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r26,15 + andi r27,15 + andi r16,15 + andi r17,15 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + std Z+24,r26 + std Z+25,r27 + std Z+26,r16 + std Z+27,r17 + movw r20,r4 + lsr r21 + ror r20 + lsr r21 + ror r20 + andi r20,48 + andi r21,48 + movw r26,r2 + movw r16,r4 + andi r26,1 + andi r27,1 + andi r16,1 + andi r17,1 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + or r26,r20 + or r27,r21 + movw r20,r4 + lsl r20 + rol r21 + lsl r20 + rol r21 + andi r20,192 + andi r21,192 + or r26,r20 + or r27,r21 + movw r20,r2 + andi r20,224 + andi r21,224 + lsr r21 + ror r20 + or r16,r20 + or r17,r21 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,7 + andi r21,7 + andi r22,7 + andi r23,7 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + ldi r19,16 + and r2,r19 + and r3,r19 + lsl r2 + rol r3 + lsl r2 + rol r3 + lsl r2 + rol r3 + or r16,r2 + or r17,r3 + std Z+28,r26 + std Z+29,r27 + std Z+30,r16 + std Z+31,r17 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r16,Z+38 + ldd r17,Z+39 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r16 + std Z+35,r17 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r4 + mov r4,r5 + mov r5,r0 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + pop r6 + pop r7 + pop r8 + pop r9 + pop r2 + pop r3 + pop r4 + pop r5 + movw r26,r30 + ret +1570: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + mov r0,r17 + mov r17,r26 + mov r26,r0 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r26,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r19,40 + mov r26,r1 +939: + ldd r0,Y+13 + ldd r20,Y+9 + std Y+9,r0 + ldd r0,Y+5 + std Y+5,r20 + ldd r20,Y+1 + std Y+1,r0 + ldd r0,Y+14 + ldd r21,Y+10 + std Y+10,r0 + ldd r0,Y+6 + std Y+6,r21 + ldd r21,Y+2 + std Y+2,r0 + ldd r0,Y+15 + ldd r22,Y+11 + std Y+11,r0 + ldd r0,Y+7 + std Y+7,r22 + ldd r22,Y+3 + std Y+3,r0 + ldd r0,Y+16 + ldd r23,Y+12 + std Y+12,r0 + ldd r0,Y+8 + std Y+8,r23 + ldd r23,Y+4 + std Y+4,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + or r21,r0 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + std Y+13,r20 + std Y+14,r21 + std Y+15,r22 + std Y+16,r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldi r20,128 + eor r25,r20 + dec r19 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + bst r2,1 + bld r0,0 + bst r5,0 + bld r2,1 + bst r2,6 + bld r5,0 + bst r4,1 + bld r2,6 + bst r5,4 + bld r4,1 + bst r2,7 + bld r5,4 + bst r3,1 + bld r2,7 + bst r5,2 + bld r3,1 + bst r4,6 + bld r5,2 + bst r4,5 + bld r4,6 + bst r5,5 + bld r4,5 + bst r5,7 + bld r5,5 + bst r3,7 + bld r5,7 + bst r3,3 + bld r3,7 + bst r3,2 + bld r3,3 + bst r4,2 + bld r3,2 + bst r4,4 + bld r4,2 + bst r2,5 + bld r4,4 + bst r5,1 + bld r2,5 + bst r5,6 + bld r5,1 + bst r4,7 + bld r5,6 + bst r3,5 + bld r4,7 + bst r5,3 + bld r3,5 + bst r3,6 + bld r5,3 + bst r4,3 + bld r3,6 + bst r3,4 + bld r4,3 + bst r2,3 + bld r3,4 + bst r3,0 + bld r2,3 + bst r2,2 + bld r3,0 + bst r4,0 + bld r2,2 + bst r2,4 + bld r4,0 + bst r0,0 + bld r2,4 + bst r6,0 + bld r0,0 + bst r7,0 + bld r6,0 + bst r7,2 + bld r7,0 + bst r9,2 + bld r7,2 + bst r9,6 + bld r9,2 + bst r9,7 + bld r9,6 + bst r8,7 + bld r9,7 + bst r8,5 + bld r8,7 + bst r6,5 + bld r8,5 + bst r6,1 + bld r6,5 + bst r0,0 + bld r6,1 + bst r6,2 + bld r0,0 + bst r9,0 + bld r6,2 + bst r7,6 + bld r9,0 + bst r9,3 + bld r7,6 + bst r8,6 + bld r9,3 + bst r9,5 + bld r8,6 + bst r6,7 + bld r9,5 + bst r8,1 + bld r6,7 + bst r6,4 + bld r8,1 + bst r7,1 + bld r6,4 + bst r0,0 + bld r7,1 + bst r6,3 + bld r0,0 + bst r8,0 + bld r6,3 + bst r7,4 + bld r8,0 + bst r7,3 + bld r7,4 + bst r8,2 + bld r7,3 + bst r9,4 + bld r8,2 + bst r7,7 + bld r9,4 + bst r8,3 + bld r7,7 + bst r8,4 + bld r8,3 + bst r7,5 + bld r8,4 + bst r0,0 + bld r7,5 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r12,0 + bld r10,0 + bst r12,4 + bld r12,0 + bst r12,5 + bld r12,4 + bst r11,5 + bld r12,5 + bst r11,3 + bld r11,5 + bst r13,2 + bld r11,3 + bst r10,6 + bld r13,2 + bst r10,1 + bld r10,6 + bst r11,0 + bld r10,1 + bst r12,2 + bld r11,0 + bst r10,4 + bld r12,2 + bst r12,1 + bld r10,4 + bst r11,4 + bld r12,1 + bst r12,3 + bld r11,4 + bst r13,4 + bld r12,3 + bst r12,7 + bld r13,4 + bst r13,5 + bld r12,7 + bst r11,7 + bld r13,5 + bst r13,3 + bld r11,7 + bst r13,6 + bld r13,3 + bst r10,7 + bld r13,6 + bst r13,1 + bld r10,7 + bst r11,6 + bld r13,1 + bst r10,3 + bld r11,6 + bst r13,0 + bld r10,3 + bst r12,6 + bld r13,0 + bst r10,5 + bld r12,6 + bst r11,1 + bld r10,5 + bst r11,2 + bld r11,1 + bst r10,2 + bld r11,2 + bst r0,0 + bld r10,2 + bst r14,0 + bld r0,0 + bst r25,0 + bld r14,0 + bst r25,6 + bld r25,0 + bst r15,7 + bld r25,6 + bst r14,3 + bld r15,7 + bst r0,0 + bld r14,3 + bst r14,1 + bld r0,0 + bst r24,0 + bld r14,1 + bst r25,4 + bld r24,0 + bst r25,7 + bld r25,4 + bst r14,7 + bld r25,7 + bst r0,0 + bld r14,7 + bst r14,2 + bld r0,0 + bst r15,0 + bld r14,2 + bst r25,2 + bld r15,0 + bst r15,6 + bld r25,2 + bst r15,3 + bld r15,6 + bst r0,0 + bld r15,3 + bst r14,4 + bld r0,0 + bst r25,1 + bld r14,4 + bst r24,6 + bld r25,1 + bst r15,5 + bld r24,6 + bst r24,3 + bld r15,5 + bst r0,0 + bld r24,3 + bst r14,5 + bld r0,0 + bst r24,1 + bld r14,5 + bst r24,4 + bld r24,1 + bst r25,5 + bld r24,4 + bst r24,7 + bld r25,5 + bst r0,0 + bld r24,7 + bst r14,6 + bld r0,0 + bst r15,1 + bld r14,6 + bst r24,2 + bld r15,1 + bst r15,4 + bld r24,2 + bst r25,3 + bld r15,4 + bst r0,0 + bld r25,3 + movw r20,r14 + movw r22,r24 + movw r14,r2 + movw r24,r4 + movw r2,r20 + movw r4,r22 + and r20,r6 + and r21,r7 + and r22,r8 + and r23,r9 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + cp r19,r1 + breq 1355f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 939b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 939b +1355: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif + +#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-tiny-avr.S b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-tiny-avr.S new file mode 100644 index 0000000..dd1f7b9 --- /dev/null +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-gift128n-tiny-avr.S @@ -0,0 +1,9480 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128n_init + .type gift128n_init, @function +gift128n_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + st Z,r22 + std Z+1,r23 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128n_init, .-gift128n_init + + .text +.global gift128n_encrypt + .type gift128n_encrypt, @function +gift128n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1585f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2323f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1585f + rcall 1585f + rjmp 2797f +1585: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2323: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2797: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_encrypt, .-gift128n_encrypt + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128n_decrypt + .type gift128n_decrypt, @function +gift128n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r22,0 + bst r18,1 + bld r4,0 + bst r18,2 + bld r8,0 + bst r18,3 + bld r12,0 + bst r18,4 + bld r22,1 + bst r18,5 + bld r4,1 + bst r18,6 + bld r8,1 + bst r18,7 + bld r12,1 + bst r19,0 + bld r22,2 + bst r19,1 + bld r4,2 + bst r19,2 + bld r8,2 + bst r19,3 + bld r12,2 + bst r19,4 + bld r22,3 + bst r19,5 + bld r4,3 + bst r19,6 + bld r8,3 + bst r19,7 + bld r12,3 + bst r20,0 + bld r22,4 + bst r20,1 + bld r4,4 + bst r20,2 + bld r8,4 + bst r20,3 + bld r12,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r4,5 + bst r20,6 + bld r8,5 + bst r20,7 + bld r12,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r4,6 + bst r21,2 + bld r8,6 + bst r21,3 + bld r12,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r4,7 + bst r21,6 + bld r8,7 + bst r21,7 + bld r12,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r23,0 + bst r18,1 + bld r5,0 + bst r18,2 + bld r9,0 + bst r18,3 + bld r13,0 + bst r18,4 + bld r23,1 + bst r18,5 + bld r5,1 + bst r18,6 + bld r9,1 + bst r18,7 + bld r13,1 + bst r19,0 + bld r23,2 + bst r19,1 + bld r5,2 + bst r19,2 + bld r9,2 + bst r19,3 + bld r13,2 + bst r19,4 + bld r23,3 + bst r19,5 + bld r5,3 + bst r19,6 + bld r9,3 + bst r19,7 + bld r13,3 + bst r20,0 + bld r23,4 + bst r20,1 + bld r5,4 + bst r20,2 + bld r9,4 + bst r20,3 + bld r13,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r5,5 + bst r20,6 + bld r9,5 + bst r20,7 + bld r13,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r5,6 + bst r21,2 + bld r9,6 + bst r21,3 + bld r13,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r5,7 + bst r21,6 + bld r9,7 + bst r21,7 + bld r13,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r2,0 + bst r18,1 + bld r6,0 + bst r18,2 + bld r10,0 + bst r18,3 + bld r14,0 + bst r18,4 + bld r2,1 + bst r18,5 + bld r6,1 + bst r18,6 + bld r10,1 + bst r18,7 + bld r14,1 + bst r19,0 + bld r2,2 + bst r19,1 + bld r6,2 + bst r19,2 + bld r10,2 + bst r19,3 + bld r14,2 + bst r19,4 + bld r2,3 + bst r19,5 + bld r6,3 + bst r19,6 + bld r10,3 + bst r19,7 + bld r14,3 + bst r20,0 + bld r2,4 + bst r20,1 + bld r6,4 + bst r20,2 + bld r10,4 + bst r20,3 + bld r14,4 + bst r20,4 + bld r2,5 + bst r20,5 + bld r6,5 + bst r20,6 + bld r10,5 + bst r20,7 + bld r14,5 + bst r21,0 + bld r2,6 + bst r21,1 + bld r6,6 + bst r21,2 + bld r10,6 + bst r21,3 + bld r14,6 + bst r21,4 + bld r2,7 + bst r21,5 + bld r6,7 + bst r21,6 + bld r10,7 + bst r21,7 + bld r14,7 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + bst r18,0 + bld r3,0 + bst r18,1 + bld r7,0 + bst r18,2 + bld r11,0 + bst r18,3 + bld r15,0 + bst r18,4 + bld r3,1 + bst r18,5 + bld r7,1 + bst r18,6 + bld r11,1 + bst r18,7 + bld r15,1 + bst r19,0 + bld r3,2 + bst r19,1 + bld r7,2 + bst r19,2 + bld r11,2 + bst r19,3 + bld r15,2 + bst r19,4 + bld r3,3 + bst r19,5 + bld r7,3 + bst r19,6 + bld r11,3 + bst r19,7 + bld r15,3 + bst r20,0 + bld r3,4 + bst r20,1 + bld r7,4 + bst r20,2 + bld r11,4 + bst r20,3 + bld r15,4 + bst r20,4 + bld r3,5 + bst r20,5 + bld r7,5 + bst r20,6 + bld r11,5 + bst r20,7 + bld r15,5 + bst r21,0 + bld r3,6 + bst r21,1 + bld r7,6 + bst r21,2 + bld r11,6 + bst r21,3 + bld r15,6 + bst r21,4 + bld r3,7 + bst r21,5 + bld r7,7 + bst r21,6 + bld r11,7 + bst r21,7 + bld r15,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +370: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 522f + cpse r16,r1 + rjmp 370b + rjmp 867f +522: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +867: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r18,0 + bst r4,0 + bld r18,1 + bst r8,0 + bld r18,2 + bst r12,0 + bld r18,3 + bst r22,1 + bld r18,4 + bst r4,1 + bld r18,5 + bst r8,1 + bld r18,6 + bst r12,1 + bld r18,7 + bst r22,2 + bld r19,0 + bst r4,2 + bld r19,1 + bst r8,2 + bld r19,2 + bst r12,2 + bld r19,3 + bst r22,3 + bld r19,4 + bst r4,3 + bld r19,5 + bst r8,3 + bld r19,6 + bst r12,3 + bld r19,7 + bst r22,4 + bld r20,0 + bst r4,4 + bld r20,1 + bst r8,4 + bld r20,2 + bst r12,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r4,5 + bld r20,5 + bst r8,5 + bld r20,6 + bst r12,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r4,6 + bld r21,1 + bst r8,6 + bld r21,2 + bst r12,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r4,7 + bld r21,5 + bst r8,7 + bld r21,6 + bst r12,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r23,0 + bld r18,0 + bst r5,0 + bld r18,1 + bst r9,0 + bld r18,2 + bst r13,0 + bld r18,3 + bst r23,1 + bld r18,4 + bst r5,1 + bld r18,5 + bst r9,1 + bld r18,6 + bst r13,1 + bld r18,7 + bst r23,2 + bld r19,0 + bst r5,2 + bld r19,1 + bst r9,2 + bld r19,2 + bst r13,2 + bld r19,3 + bst r23,3 + bld r19,4 + bst r5,3 + bld r19,5 + bst r9,3 + bld r19,6 + bst r13,3 + bld r19,7 + bst r23,4 + bld r20,0 + bst r5,4 + bld r20,1 + bst r9,4 + bld r20,2 + bst r13,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r5,5 + bld r20,5 + bst r9,5 + bld r20,6 + bst r13,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r5,6 + bld r21,1 + bst r9,6 + bld r21,2 + bst r13,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r5,7 + bld r21,5 + bst r9,7 + bld r21,6 + bst r13,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r2,0 + bld r18,0 + bst r6,0 + bld r18,1 + bst r10,0 + bld r18,2 + bst r14,0 + bld r18,3 + bst r2,1 + bld r18,4 + bst r6,1 + bld r18,5 + bst r10,1 + bld r18,6 + bst r14,1 + bld r18,7 + bst r2,2 + bld r19,0 + bst r6,2 + bld r19,1 + bst r10,2 + bld r19,2 + bst r14,2 + bld r19,3 + bst r2,3 + bld r19,4 + bst r6,3 + bld r19,5 + bst r10,3 + bld r19,6 + bst r14,3 + bld r19,7 + bst r2,4 + bld r20,0 + bst r6,4 + bld r20,1 + bst r10,4 + bld r20,2 + bst r14,4 + bld r20,3 + bst r2,5 + bld r20,4 + bst r6,5 + bld r20,5 + bst r10,5 + bld r20,6 + bst r14,5 + bld r20,7 + bst r2,6 + bld r21,0 + bst r6,6 + bld r21,1 + bst r10,6 + bld r21,2 + bst r14,6 + bld r21,3 + bst r2,7 + bld r21,4 + bst r6,7 + bld r21,5 + bst r10,7 + bld r21,6 + bst r14,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + bst r3,0 + bld r18,0 + bst r7,0 + bld r18,1 + bst r11,0 + bld r18,2 + bst r15,0 + bld r18,3 + bst r3,1 + bld r18,4 + bst r7,1 + bld r18,5 + bst r11,1 + bld r18,6 + bst r15,1 + bld r18,7 + bst r3,2 + bld r19,0 + bst r7,2 + bld r19,1 + bst r11,2 + bld r19,2 + bst r15,2 + bld r19,3 + bst r3,3 + bld r19,4 + bst r7,3 + bld r19,5 + bst r11,3 + bld r19,6 + bst r15,3 + bld r19,7 + bst r3,4 + bld r20,0 + bst r7,4 + bld r20,1 + bst r11,4 + bld r20,2 + bst r15,4 + bld r20,3 + bst r3,5 + bld r20,4 + bst r7,5 + bld r20,5 + bst r11,5 + bld r20,6 + bst r15,5 + bld r20,7 + bst r3,6 + bld r21,0 + bst r7,6 + bld r21,1 + bst r11,6 + bld r21,2 + bst r15,6 + bld r21,3 + bst r3,7 + bld r21,4 + bst r7,7 + bld r21,5 + bst r11,7 + bld r21,6 + bst r15,7 + bld r21,7 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128n_decrypt, .-gift128n_decrypt + + .text +.global gift128t_encrypt + .type gift128t_encrypt, @function +gift128t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + ldd r6,Z+4 + ldd r7,Z+5 + ldd r8,Z+6 + ldd r9,Z+7 + ldd r10,Z+8 + ldd r11,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + ldd r14,Z+12 + ldd r15,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + st Z+,r24 + st Z+,r25 + ldi r19,4 +35: + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + mov r0,r4 + mov r4,r8 + mov r8,r0 + mov r0,r5 + mov r5,r9 + mov r9,r0 + st Z+,r14 + st Z+,r15 + st Z+,r24 + st Z+,r25 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + mov r0,r12 + mov r12,r24 + mov r24,r0 + mov r0,r13 + mov r13,r25 + mov r25,r0 + dec r19 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r19,2 +121: + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + st Z,r5 + std Z+1,r3 + std Z+2,r4 + std Z+3,r2 + ldd r2,Z+4 + ldd r3,Z+5 + ldd r4,Z+6 + ldd r5,Z+7 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,85 + mov r21,r1 + andi r22,85 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+4,r5 + std Z+5,r3 + std Z+6,r4 + std Z+7,r2 + ldd r2,Z+8 + ldd r3,Z+9 + ldd r4,Z+10 + ldd r5,Z+11 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+8,r5 + std Z+9,r3 + std Z+10,r4 + std Z+11,r2 + ldd r2,Z+12 + ldd r3,Z+13 + ldd r4,Z+14 + ldd r5,Z+15 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,17 + andi r21,17 + andi r22,17 + andi r23,17 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,15 + mov r21,r1 + andi r22,15 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+12,r5 + std Z+13,r3 + std Z+14,r4 + std Z+15,r2 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r21 + rol r22 + rol r23 + rol r0 + movw r20,r22 + mov r22,r0 + mov r23,r1 + eor r20,r2 + eor r21,r3 + andi r20,170 + andi r21,170 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r0,r1 + lsr r22 + ror r21 + ror r20 + ror r0 + movw r22,r20 + mov r21,r0 + mov r20,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+16,r5 + std Z+17,r3 + std Z+18,r4 + std Z+19,r2 + ldd r2,Z+20 + ldd r3,Z+21 + ldd r4,Z+22 + ldd r5,Z+23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r21 + rol r22 + rol r23 + rol r0 + movw r20,r22 + mov r22,r0 + mov r23,r1 + eor r20,r2 + eor r21,r3 + andi r20,170 + andi r21,170 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r0,r1 + lsr r22 + ror r21 + ror r20 + ror r0 + movw r22,r20 + mov r21,r0 + mov r20,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + movw r20,r22 + mov r22,r1 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,51 + andi r21,51 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+20,r5 + std Z+21,r3 + std Z+22,r4 + std Z+23,r2 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,10 + andi r21,10 + andi r22,10 + andi r23,10 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,204 + mov r21,r1 + andi r22,204 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+24,r5 + std Z+25,r3 + std Z+26,r4 + std Z+27,r2 + ldd r2,Z+28 + ldd r3,Z+29 + ldd r4,Z+30 + ldd r5,Z+31 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,10 + andi r21,10 + andi r22,10 + andi r23,10 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r0,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + lsl r20 + rol r21 + rol r22 + rol r23 + rol r0 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r0 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + andi r20,204 + mov r21,r1 + andi r22,204 + mov r23,r1 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + lsr r23 + ror r22 + ror r21 + ror r20 + ror r0 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r0 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + movw r20,r2 + movw r22,r4 + mov r20,r21 + mov r21,r22 + mov r22,r23 + mov r23,r1 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r2 + eor r21,r3 + andi r20,240 + andi r21,240 + eor r2,r20 + eor r3,r21 + mov r22,r1 + mov r23,r1 + mov r23,r22 + mov r22,r21 + mov r21,r20 + mov r20,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + std Z+28,r5 + std Z+29,r3 + std Z+30,r4 + std Z+31,r2 + dec r19 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,20 + adiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,60 + adiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,100 + adiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2351f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r19,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r19 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1613f + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rcall 1613f + rjmp 2826f +1613: + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,204 + andi r21,204 + andi r22,204 + andi r23,204 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + ldi r19,51 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + or r6,r20 + or r7,r21 + or r8,r22 + or r9,r23 + movw r20,r10 + movw r22,r12 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,238 + andi r21,238 + andi r22,238 + andi r23,238 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + lsr r13 + ror r12 + ror r11 + ror r10 + ldi r17,17 + and r10,r17 + and r11,r17 + and r12,r17 + and r13,r17 + or r10,r20 + or r11,r21 + or r12,r22 + or r13,r23 + movw r20,r14 + movw r22,r24 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + andi r20,136 + andi r21,136 + andi r22,136 + andi r23,136 + lsr r25 + ror r24 + ror r15 + ror r14 + ldi r16,119 + and r14,r16 + and r15,r16 + andi r24,119 + andi r25,119 + or r14,r20 + or r15,r21 + or r24,r22 + or r25,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + lsl r12 + rol r13 + adc r12,r1 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + movw r20,r6 + movw r22,r8 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + mov r0,r12 + mov r12,r10 + mov r10,r0 + mov r0,r13 + mov r13,r11 + mov r11,r0 + movw r20,r10 + movw r22,r12 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r10 + eor r21,r11 + andi r20,85 + andi r21,85 + eor r10,r20 + eor r11,r21 + mov r22,r1 + mov r23,r1 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + mov r0,r24 + mov r24,r14 + mov r14,r0 + mov r0,r25 + mov r25,r15 + mov r15,r0 + movw r20,r24 + lsr r21 + ror r20 + eor r20,r24 + eor r21,r25 + andi r20,85 + andi r21,85 + eor r24,r20 + eor r25,r21 + lsl r20 + rol r21 + eor r24,r20 + eor r25,r21 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r24 + and r0,r12 + eor r8,r0 + mov r0,r25 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r8 + and r0,r4 + eor r24,r0 + mov r0,r9 + and r0,r5 + eor r25,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r24 + or r0,r8 + eor r12,r0 + mov r0,r25 + or r0,r9 + eor r13,r0 + eor r2,r10 + eor r3,r11 + eor r4,r12 + eor r5,r13 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + com r2 + com r3 + com r4 + com r5 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r24 + and r0,r8 + eor r12,r0 + mov r0,r25 + and r0,r9 + eor r13,r0 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + swap r6 + swap r7 + swap r8 + swap r9 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + mov r0,r1 + lsr r12 + ror r0 + lsr r12 + ror r0 + or r12,r0 + mov r0,r1 + lsr r13 + ror r0 + lsr r13 + ror r0 + or r13,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r14,r20 + eor r15,r21 + eor r24,r22 + eor r25,r23 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + com r14 + com r15 + com r24 + com r25 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r8 + eor r12,r0 + mov r0,r5 + and r0,r9 + eor r13,r0 + mov r0,r8 + mov r8,r6 + mov r6,r0 + mov r0,r9 + mov r9,r7 + mov r7,r0 + mov r0,r10 + mov r10,r11 + mov r11,r12 + mov r12,r13 + mov r13,r0 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + ret +2351: + movw r30,r26 + sbiw r30,40 + push r5 + push r4 + push r3 + push r2 + push r9 + push r8 + push r7 + push r6 + ld r2,Z + ldd r3,Z+1 + ldd r4,Z+2 + ldd r5,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + movw r20,r26 + movw r22,r16 + movw r20,r22 + mov r22,r1 + mov r23,r1 + eor r20,r26 + eor r21,r27 + andi r20,51 + andi r21,51 + eor r26,r20 + eor r27,r21 + mov r22,r1 + mov r23,r1 + movw r22,r20 + mov r20,r1 + mov r21,r1 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,68 + andi r21,68 + andi r22,85 + andi r23,85 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + st Z,r26 + std Z+1,r27 + std Z+2,r16 + std Z+3,r17 + movw r20,r2 + movw r22,r4 + andi r20,51 + andi r21,51 + andi r22,51 + andi r23,51 + ldi r19,204 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + or r4,r23 + or r5,r20 + or r2,r21 + or r3,r22 + movw r20,r4 + movw r22,r2 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r4 + eor r21,r5 + eor r22,r2 + eor r23,r3 + mov r20,r1 + andi r21,17 + andi r22,85 + andi r23,85 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r4,r20 + eor r5,r21 + eor r2,r22 + eor r3,r23 + std Z+4,r4 + std Z+5,r5 + std Z+6,r2 + std Z+7,r3 + ldd r2,Z+8 + ldd r3,Z+9 + ldd r4,Z+10 + ldd r5,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r16 + adc r16,r1 + lsl r16 + adc r16,r1 + swap r17 + std Z+8,r26 + std Z+9,r27 + std Z+10,r16 + std Z+11,r17 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r5 + adc r5,r1 + lsl r5 + adc r5,r1 + std Z+12,r2 + std Z+13,r3 + std Z+14,r4 + std Z+15,r5 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r16,Z+22 + ldd r17,Z+23 + movw r20,r26 + movw r22,r16 + andi r20,170 + andi r21,170 + andi r22,170 + andi r23,170 + andi r26,85 + andi r27,85 + andi r16,85 + andi r17,85 + or r26,r21 + or r27,r22 + or r16,r23 + or r17,r20 + std Z+16,r16 + std Z+17,r17 + std Z+18,r26 + std Z+19,r27 + movw r20,r2 + movw r22,r4 + andi r20,85 + andi r21,85 + andi r22,85 + andi r23,85 + ldi r19,170 + and r2,r19 + and r3,r19 + and r4,r19 + and r5,r19 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + lsl r2 + rol r3 + rol r4 + rol r5 + adc r2,r1 + or r2,r20 + or r3,r21 + or r4,r22 + or r5,r23 + std Z+20,r5 + std Z+21,r2 + std Z+22,r3 + std Z+23,r4 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r16,Z+30 + ldd r17,Z+31 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + lsr r23 + ror r22 + ror r21 + ror r20 + eor r20,r26 + eor r21,r27 + eor r22,r16 + eor r23,r17 + andi r20,3 + andi r21,3 + andi r22,3 + andi r23,3 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + lsl r20 + rol r21 + rol r22 + rol r23 + lsl r20 + rol r21 + rol r22 + rol r23 + eor r26,r20 + eor r27,r21 + eor r16,r22 + eor r17,r23 + movw r20,r26 + movw r22,r16 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,120 + andi r21,120 + andi r22,120 + andi r23,120 + movw r6,r20 + movw r8,r22 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + lsr r9 + ror r8 + ror r7 + ror r6 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldi r19,8 + and r6,r19 + and r7,r19 + and r8,r19 + and r9,r19 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + lsl r6 + rol r7 + rol r8 + rol r9 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + andi r26,15 + andi r27,15 + andi r16,15 + andi r17,15 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + std Z+24,r26 + std Z+25,r27 + std Z+26,r16 + std Z+27,r17 + movw r20,r4 + lsr r21 + ror r20 + lsr r21 + ror r20 + andi r20,48 + andi r21,48 + movw r26,r2 + movw r16,r4 + andi r26,1 + andi r27,1 + andi r16,1 + andi r17,1 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + lsl r26 + rol r27 + rol r16 + rol r17 + or r26,r20 + or r27,r21 + movw r20,r4 + lsl r20 + rol r21 + lsl r20 + rol r21 + andi r20,192 + andi r21,192 + or r26,r20 + or r27,r21 + movw r20,r2 + andi r20,224 + andi r21,224 + lsr r21 + ror r20 + or r16,r20 + or r17,r21 + movw r20,r2 + movw r22,r4 + lsr r23 + ror r22 + ror r21 + ror r20 + andi r20,7 + andi r21,7 + andi r22,7 + andi r23,7 + or r26,r20 + or r27,r21 + or r16,r22 + or r17,r23 + ldi r19,16 + and r2,r19 + and r3,r19 + lsl r2 + rol r3 + lsl r2 + rol r3 + lsl r2 + rol r3 + or r16,r2 + or r17,r3 + std Z+28,r26 + std Z+29,r27 + std Z+30,r16 + std Z+31,r17 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r16,Z+38 + ldd r17,Z+39 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r16 + std Z+35,r17 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r4 + mov r4,r5 + mov r5,r0 + lsl r4 + rol r5 + adc r4,r1 + lsl r4 + rol r5 + adc r4,r1 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + pop r6 + pop r7 + pop r8 + pop r9 + pop r2 + pop r3 + pop r4 + pop r5 + movw r26,r30 + ret +2826: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_encrypt, .-gift128t_encrypt + + .text +.global gift128t_decrypt + .type gift128t_decrypt, @function +gift128t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r2,0 + bst r20,1 + bld r6,0 + bst r20,2 + bld r10,0 + bst r20,3 + bld r14,0 + bst r20,4 + bld r2,1 + bst r20,5 + bld r6,1 + bst r20,6 + bld r10,1 + bst r20,7 + bld r14,1 + bst r21,0 + bld r2,2 + bst r21,1 + bld r6,2 + bst r21,2 + bld r10,2 + bst r21,3 + bld r14,2 + bst r21,4 + bld r2,3 + bst r21,5 + bld r6,3 + bst r21,6 + bld r10,3 + bst r21,7 + bld r14,3 + bst r22,0 + bld r2,4 + bst r22,1 + bld r6,4 + bst r22,2 + bld r10,4 + bst r22,3 + bld r14,4 + bst r22,4 + bld r2,5 + bst r22,5 + bld r6,5 + bst r22,6 + bld r10,5 + bst r22,7 + bld r14,5 + bst r23,0 + bld r2,6 + bst r23,1 + bld r6,6 + bst r23,2 + bld r10,6 + bst r23,3 + bld r14,6 + bst r23,4 + bld r2,7 + bst r23,5 + bld r6,7 + bst r23,6 + bld r10,7 + bst r23,7 + bld r14,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r3,0 + bst r20,1 + bld r7,0 + bst r20,2 + bld r11,0 + bst r20,3 + bld r15,0 + bst r20,4 + bld r3,1 + bst r20,5 + bld r7,1 + bst r20,6 + bld r11,1 + bst r20,7 + bld r15,1 + bst r21,0 + bld r3,2 + bst r21,1 + bld r7,2 + bst r21,2 + bld r11,2 + bst r21,3 + bld r15,2 + bst r21,4 + bld r3,3 + bst r21,5 + bld r7,3 + bst r21,6 + bld r11,3 + bst r21,7 + bld r15,3 + bst r22,0 + bld r3,4 + bst r22,1 + bld r7,4 + bst r22,2 + bld r11,4 + bst r22,3 + bld r15,4 + bst r22,4 + bld r3,5 + bst r22,5 + bld r7,5 + bst r22,6 + bld r11,5 + bst r22,7 + bld r15,5 + bst r23,0 + bld r3,6 + bst r23,1 + bld r7,6 + bst r23,2 + bld r11,6 + bst r23,3 + bld r15,6 + bst r23,4 + bld r3,7 + bst r23,5 + bld r7,7 + bst r23,6 + bld r11,7 + bst r23,7 + bld r15,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r4,0 + bst r20,1 + bld r8,0 + bst r20,2 + bld r12,0 + bst r20,3 + bld r24,0 + bst r20,4 + bld r4,1 + bst r20,5 + bld r8,1 + bst r20,6 + bld r12,1 + bst r20,7 + bld r24,1 + bst r21,0 + bld r4,2 + bst r21,1 + bld r8,2 + bst r21,2 + bld r12,2 + bst r21,3 + bld r24,2 + bst r21,4 + bld r4,3 + bst r21,5 + bld r8,3 + bst r21,6 + bld r12,3 + bst r21,7 + bld r24,3 + bst r22,0 + bld r4,4 + bst r22,1 + bld r8,4 + bst r22,2 + bld r12,4 + bst r22,3 + bld r24,4 + bst r22,4 + bld r4,5 + bst r22,5 + bld r8,5 + bst r22,6 + bld r12,5 + bst r22,7 + bld r24,5 + bst r23,0 + bld r4,6 + bst r23,1 + bld r8,6 + bst r23,2 + bld r12,6 + bst r23,3 + bld r24,6 + bst r23,4 + bld r4,7 + bst r23,5 + bld r8,7 + bst r23,6 + bld r12,7 + bst r23,7 + bld r24,7 + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + bst r20,0 + bld r5,0 + bst r20,1 + bld r9,0 + bst r20,2 + bld r13,0 + bst r20,3 + bld r25,0 + bst r20,4 + bld r5,1 + bst r20,5 + bld r9,1 + bst r20,6 + bld r13,1 + bst r20,7 + bld r25,1 + bst r21,0 + bld r5,2 + bst r21,1 + bld r9,2 + bst r21,2 + bld r13,2 + bst r21,3 + bld r25,2 + bst r21,4 + bld r5,3 + bst r21,5 + bld r9,3 + bst r21,6 + bld r13,3 + bst r21,7 + bld r25,3 + bst r22,0 + bld r5,4 + bst r22,1 + bld r9,4 + bst r22,2 + bld r13,4 + bst r22,3 + bld r25,4 + bst r22,4 + bld r5,5 + bst r22,5 + bld r9,5 + bst r22,6 + bld r13,5 + bst r22,7 + bld r25,5 + bst r23,0 + bld r5,6 + bst r23,1 + bld r9,6 + bst r23,2 + bld r13,6 + bst r23,3 + bld r25,6 + bst r23,4 + bld r5,7 + bst r23,5 + bld r9,7 + bst r23,6 + bld r13,7 + bst r23,7 + bld r25,7 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r16,Z+14 + ldd r17,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r16 + std Y+4,r17 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r16,Z+6 + ldd r17,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r16 + std Y+8,r17 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r16,Z+10 + ldd r17,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r16 + std Y+12,r17 + ld r26,Z + ldd r27,Z+1 + ldd r16,Z+2 + ldd r17,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + lsr r17 + ror r16 + ror r0 + or r17,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r16 + std Y+16,r17 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r26,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r19,40 + mov r26,r1 +375: + ldd r0,Y+13 + ldd r20,Y+9 + std Y+9,r0 + ldd r0,Y+5 + std Y+5,r20 + ldd r20,Y+1 + std Y+1,r0 + ldd r0,Y+14 + ldd r21,Y+10 + std Y+10,r0 + ldd r0,Y+6 + std Y+6,r21 + ldd r21,Y+2 + std Y+2,r0 + ldd r0,Y+15 + ldd r22,Y+11 + std Y+11,r0 + ldd r0,Y+7 + std Y+7,r22 + ldd r22,Y+3 + std Y+3,r0 + ldd r0,Y+16 + ldd r23,Y+12 + std Y+12,r0 + ldd r0,Y+8 + std Y+8,r23 + ldd r23,Y+4 + std Y+4,r0 + mov r0,r1 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + lsr r21 + ror r20 + ror r0 + or r21,r0 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + std Y+13,r20 + std Y+14,r21 + std Y+15,r22 + std Y+16,r23 + eor r6,r20 + eor r7,r21 + eor r8,r22 + eor r9,r23 + ldd r0,Y+5 + eor r10,r0 + ldd r0,Y+6 + eor r11,r0 + ldd r0,Y+7 + eor r12,r0 + ldd r0,Y+8 + eor r13,r0 + ldi r20,128 + eor r25,r20 + dec r19 + mov r30,r19 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + eor r14,r20 + bst r2,1 + bld r0,0 + bst r5,0 + bld r2,1 + bst r2,6 + bld r5,0 + bst r4,1 + bld r2,6 + bst r5,4 + bld r4,1 + bst r2,7 + bld r5,4 + bst r3,1 + bld r2,7 + bst r5,2 + bld r3,1 + bst r4,6 + bld r5,2 + bst r4,5 + bld r4,6 + bst r5,5 + bld r4,5 + bst r5,7 + bld r5,5 + bst r3,7 + bld r5,7 + bst r3,3 + bld r3,7 + bst r3,2 + bld r3,3 + bst r4,2 + bld r3,2 + bst r4,4 + bld r4,2 + bst r2,5 + bld r4,4 + bst r5,1 + bld r2,5 + bst r5,6 + bld r5,1 + bst r4,7 + bld r5,6 + bst r3,5 + bld r4,7 + bst r5,3 + bld r3,5 + bst r3,6 + bld r5,3 + bst r4,3 + bld r3,6 + bst r3,4 + bld r4,3 + bst r2,3 + bld r3,4 + bst r3,0 + bld r2,3 + bst r2,2 + bld r3,0 + bst r4,0 + bld r2,2 + bst r2,4 + bld r4,0 + bst r0,0 + bld r2,4 + bst r6,0 + bld r0,0 + bst r7,0 + bld r6,0 + bst r7,2 + bld r7,0 + bst r9,2 + bld r7,2 + bst r9,6 + bld r9,2 + bst r9,7 + bld r9,6 + bst r8,7 + bld r9,7 + bst r8,5 + bld r8,7 + bst r6,5 + bld r8,5 + bst r6,1 + bld r6,5 + bst r0,0 + bld r6,1 + bst r6,2 + bld r0,0 + bst r9,0 + bld r6,2 + bst r7,6 + bld r9,0 + bst r9,3 + bld r7,6 + bst r8,6 + bld r9,3 + bst r9,5 + bld r8,6 + bst r6,7 + bld r9,5 + bst r8,1 + bld r6,7 + bst r6,4 + bld r8,1 + bst r7,1 + bld r6,4 + bst r0,0 + bld r7,1 + bst r6,3 + bld r0,0 + bst r8,0 + bld r6,3 + bst r7,4 + bld r8,0 + bst r7,3 + bld r7,4 + bst r8,2 + bld r7,3 + bst r9,4 + bld r8,2 + bst r7,7 + bld r9,4 + bst r8,3 + bld r7,7 + bst r8,4 + bld r8,3 + bst r7,5 + bld r8,4 + bst r0,0 + bld r7,5 + bst r6,6 + bld r0,0 + bst r9,1 + bld r6,6 + bst r0,0 + bld r9,1 + bst r10,0 + bld r0,0 + bst r12,0 + bld r10,0 + bst r12,4 + bld r12,0 + bst r12,5 + bld r12,4 + bst r11,5 + bld r12,5 + bst r11,3 + bld r11,5 + bst r13,2 + bld r11,3 + bst r10,6 + bld r13,2 + bst r10,1 + bld r10,6 + bst r11,0 + bld r10,1 + bst r12,2 + bld r11,0 + bst r10,4 + bld r12,2 + bst r12,1 + bld r10,4 + bst r11,4 + bld r12,1 + bst r12,3 + bld r11,4 + bst r13,4 + bld r12,3 + bst r12,7 + bld r13,4 + bst r13,5 + bld r12,7 + bst r11,7 + bld r13,5 + bst r13,3 + bld r11,7 + bst r13,6 + bld r13,3 + bst r10,7 + bld r13,6 + bst r13,1 + bld r10,7 + bst r11,6 + bld r13,1 + bst r10,3 + bld r11,6 + bst r13,0 + bld r10,3 + bst r12,6 + bld r13,0 + bst r10,5 + bld r12,6 + bst r11,1 + bld r10,5 + bst r11,2 + bld r11,1 + bst r10,2 + bld r11,2 + bst r0,0 + bld r10,2 + bst r14,0 + bld r0,0 + bst r25,0 + bld r14,0 + bst r25,6 + bld r25,0 + bst r15,7 + bld r25,6 + bst r14,3 + bld r15,7 + bst r0,0 + bld r14,3 + bst r14,1 + bld r0,0 + bst r24,0 + bld r14,1 + bst r25,4 + bld r24,0 + bst r25,7 + bld r25,4 + bst r14,7 + bld r25,7 + bst r0,0 + bld r14,7 + bst r14,2 + bld r0,0 + bst r15,0 + bld r14,2 + bst r25,2 + bld r15,0 + bst r15,6 + bld r25,2 + bst r15,3 + bld r15,6 + bst r0,0 + bld r15,3 + bst r14,4 + bld r0,0 + bst r25,1 + bld r14,4 + bst r24,6 + bld r25,1 + bst r15,5 + bld r24,6 + bst r24,3 + bld r15,5 + bst r0,0 + bld r24,3 + bst r14,5 + bld r0,0 + bst r24,1 + bld r14,5 + bst r24,4 + bld r24,1 + bst r25,5 + bld r24,4 + bst r24,7 + bld r25,5 + bst r0,0 + bld r24,7 + bst r14,6 + bld r0,0 + bst r15,1 + bld r14,6 + bst r24,2 + bld r15,1 + bst r15,4 + bld r24,2 + bst r25,3 + bld r15,4 + bst r0,0 + bld r25,3 + movw r20,r14 + movw r22,r24 + movw r14,r2 + movw r24,r4 + movw r2,r20 + movw r4,r22 + and r20,r6 + and r21,r7 + and r22,r8 + and r23,r9 + eor r10,r20 + eor r11,r21 + eor r12,r22 + eor r13,r23 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r14,r10 + eor r15,r11 + eor r24,r12 + eor r25,r13 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + or r0,r8 + eor r12,r0 + mov r0,r5 + or r0,r9 + eor r13,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r8 + and r0,r24 + eor r4,r0 + mov r0,r9 + and r0,r25 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r8,r0 + mov r0,r5 + and r0,r13 + eor r9,r0 + cp r19,r1 + breq 791f + inc r26 + ldi r27,5 + cpse r26,r27 + rjmp 375b + mov r26,r1 + eor r2,r18 + eor r3,r18 + eor r4,r18 + eor r5,r18 + rjmp 375b +791: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + bst r2,0 + bld r20,0 + bst r6,0 + bld r20,1 + bst r10,0 + bld r20,2 + bst r14,0 + bld r20,3 + bst r2,1 + bld r20,4 + bst r6,1 + bld r20,5 + bst r10,1 + bld r20,6 + bst r14,1 + bld r20,7 + bst r2,2 + bld r21,0 + bst r6,2 + bld r21,1 + bst r10,2 + bld r21,2 + bst r14,2 + bld r21,3 + bst r2,3 + bld r21,4 + bst r6,3 + bld r21,5 + bst r10,3 + bld r21,6 + bst r14,3 + bld r21,7 + bst r2,4 + bld r22,0 + bst r6,4 + bld r22,1 + bst r10,4 + bld r22,2 + bst r14,4 + bld r22,3 + bst r2,5 + bld r22,4 + bst r6,5 + bld r22,5 + bst r10,5 + bld r22,6 + bst r14,5 + bld r22,7 + bst r2,6 + bld r23,0 + bst r6,6 + bld r23,1 + bst r10,6 + bld r23,2 + bst r14,6 + bld r23,3 + bst r2,7 + bld r23,4 + bst r6,7 + bld r23,5 + bst r10,7 + bld r23,6 + bst r14,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r3,0 + bld r20,0 + bst r7,0 + bld r20,1 + bst r11,0 + bld r20,2 + bst r15,0 + bld r20,3 + bst r3,1 + bld r20,4 + bst r7,1 + bld r20,5 + bst r11,1 + bld r20,6 + bst r15,1 + bld r20,7 + bst r3,2 + bld r21,0 + bst r7,2 + bld r21,1 + bst r11,2 + bld r21,2 + bst r15,2 + bld r21,3 + bst r3,3 + bld r21,4 + bst r7,3 + bld r21,5 + bst r11,3 + bld r21,6 + bst r15,3 + bld r21,7 + bst r3,4 + bld r22,0 + bst r7,4 + bld r22,1 + bst r11,4 + bld r22,2 + bst r15,4 + bld r22,3 + bst r3,5 + bld r22,4 + bst r7,5 + bld r22,5 + bst r11,5 + bld r22,6 + bst r15,5 + bld r22,7 + bst r3,6 + bld r23,0 + bst r7,6 + bld r23,1 + bst r11,6 + bld r23,2 + bst r15,6 + bld r23,3 + bst r3,7 + bld r23,4 + bst r7,7 + bld r23,5 + bst r11,7 + bld r23,6 + bst r15,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r4,0 + bld r20,0 + bst r8,0 + bld r20,1 + bst r12,0 + bld r20,2 + bst r24,0 + bld r20,3 + bst r4,1 + bld r20,4 + bst r8,1 + bld r20,5 + bst r12,1 + bld r20,6 + bst r24,1 + bld r20,7 + bst r4,2 + bld r21,0 + bst r8,2 + bld r21,1 + bst r12,2 + bld r21,2 + bst r24,2 + bld r21,3 + bst r4,3 + bld r21,4 + bst r8,3 + bld r21,5 + bst r12,3 + bld r21,6 + bst r24,3 + bld r21,7 + bst r4,4 + bld r22,0 + bst r8,4 + bld r22,1 + bst r12,4 + bld r22,2 + bst r24,4 + bld r22,3 + bst r4,5 + bld r22,4 + bst r8,5 + bld r22,5 + bst r12,5 + bld r22,6 + bst r24,5 + bld r22,7 + bst r4,6 + bld r23,0 + bst r8,6 + bld r23,1 + bst r12,6 + bld r23,2 + bst r24,6 + bld r23,3 + bst r4,7 + bld r23,4 + bst r8,7 + bld r23,5 + bst r12,7 + bld r23,6 + bst r24,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + bst r5,0 + bld r20,0 + bst r9,0 + bld r20,1 + bst r13,0 + bld r20,2 + bst r25,0 + bld r20,3 + bst r5,1 + bld r20,4 + bst r9,1 + bld r20,5 + bst r13,1 + bld r20,6 + bst r25,1 + bld r20,7 + bst r5,2 + bld r21,0 + bst r9,2 + bld r21,1 + bst r13,2 + bld r21,2 + bst r25,2 + bld r21,3 + bst r5,3 + bld r21,4 + bst r9,3 + bld r21,5 + bst r13,3 + bld r21,6 + bst r25,3 + bld r21,7 + bst r5,4 + bld r22,0 + bst r9,4 + bld r22,1 + bst r13,4 + bld r22,2 + bst r25,4 + bld r22,3 + bst r5,5 + bld r22,4 + bst r9,5 + bld r22,5 + bst r13,5 + bld r22,6 + bst r25,5 + bld r22,7 + bst r5,6 + bld r23,0 + bst r9,6 + bld r23,1 + bst r13,6 + bld r23,2 + bst r25,6 + bld r23,3 + bst r5,7 + bld r23,4 + bst r9,7 + bld r23,5 + bst r13,7 + bld r23,6 + bst r25,7 + bld r23,7 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128t_decrypt, .-gift128t_decrypt + +#endif + +#endif diff --git a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-util.h b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-util.h +++ b/hyena/Implementations/crypto_aead/hyenav1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.c b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.h b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/api.h b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/encrypt.c b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/encrypt.c deleted file mode 100644 index 18697ad..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "isap.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_ascon_128a_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_ascon_128a_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.c b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.h b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-isap.h b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-isap.h deleted file mode 100644 index ba99f2a..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-isap.h +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ISAP variant. - * - * ISAP_ALG_NAME Name of the ISAP algorithm; e.g. isap_keccak_128 - * ISAP_RATE Number of bytes in the rate for hashing and encryption. - * ISAP_sH Number of rounds for hashing. - * ISAP_sE Number of rounds for encryption. - * ISAP_sB Number of rounds for key bit absorption. - * ISAP_sK Number of rounds for keying. - * ISAP_STATE Type for the permuation state; e.g. ascon_state_t - * ISAP_PERMUTE(s,r) Permutes the state "s" with number of rounds "r". - */ -#if defined(ISAP_ALG_NAME) - -#define ISAP_CONCAT_INNER(name,suffix) name##suffix -#define ISAP_CONCAT(name,suffix) ISAP_CONCAT_INNER(name,suffix) - -/* IV string for initialising the associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_A) - [sizeof(ISAP_STATE) - ISAP_NONCE_SIZE] = { - 0x01, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for authenticating associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x02, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for encrypting payload data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x03, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/** - * \brief Re-keys the ISAP permutation state. - * - * \param state The permutation state to be re-keyed. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param iv Points to the initialization vector for this re-keying operation. - * \param data Points to the data to be absorbed to perform the re-keying. - * \param data_len Length of the data to be absorbed. - * - * The output key will be left in the leading bytes of \a state. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *iv, - const unsigned char *data, unsigned data_len) -{ - unsigned bit, num_bits; - - /* Initialize the state with the key and IV */ - memcpy(state->B, k, ISAP_KEY_SIZE); - memcpy(state->B + ISAP_KEY_SIZE, iv, sizeof(state->B) - ISAP_KEY_SIZE); - ISAP_PERMUTE(state, ISAP_sK); - - /* Absorb all of the bits of the data buffer one by one */ - num_bits = data_len * 8 - 1; - for (bit = 0; bit < num_bits; ++bit) { - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sB); - } - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sK); -} - -/** - * \brief Encrypts (or decrypts) a message payload with ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param c Buffer to receive the output ciphertext. - * \param m Buffer to receive the input plaintext. - * \param mlen Length of the input plaintext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_encrypt) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Set up the re-keyed encryption key and nonce in the state */ - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE), npub, ISAP_NONCE_SIZE); - memcpy(state->B + sizeof(ISAP_STATE) - ISAP_NONCE_SIZE, - npub, ISAP_NONCE_SIZE); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= ISAP_RATE) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, ISAP_RATE); - c += ISAP_RATE; - m += ISAP_RATE; - mlen -= ISAP_RATE; - } - if (mlen > 0) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, (unsigned)mlen); - } -} - -/** - * \brief Authenticates the associated data and ciphertext using ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param ad Buffer containing the associated data. - * \param adlen Length of the associated data. - * \param c Buffer containing the ciphertext. - * \param clen Length of the ciphertext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *c, unsigned long long clen, - unsigned char *tag) -{ - unsigned char preserve[sizeof(ISAP_STATE) - ISAP_TAG_SIZE]; - unsigned temp; - - /* Absorb the associated data */ - memcpy(state->B, npub, ISAP_NONCE_SIZE); - memcpy(state->B + ISAP_NONCE_SIZE, ISAP_CONCAT(ISAP_ALG_NAME,_IV_A), - sizeof(state->B) - ISAP_NONCE_SIZE); - ISAP_PERMUTE(state, ISAP_sH); - while (adlen >= ISAP_RATE) { - lw_xor_block(state->B, ad, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - ad += ISAP_RATE; - adlen -= ISAP_RATE; - } - temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - state->B[sizeof(state->B) - 1] ^= 0x01; /* domain separation */ - - /* Absorb the ciphertext */ - while (clen >= ISAP_RATE) { - lw_xor_block(state->B, c, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - c += ISAP_RATE; - clen -= ISAP_RATE; - } - temp = (unsigned)clen; - lw_xor_block(state->B, c, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - - /* Re-key the state and generate the authentication tag */ - memcpy(tag, state->B, ISAP_TAG_SIZE); - memcpy(preserve, state->B + ISAP_TAG_SIZE, sizeof(preserve)); - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA), tag, ISAP_TAG_SIZE); - memcpy(state->B + ISAP_TAG_SIZE, preserve, sizeof(preserve)); - ISAP_PERMUTE(state, ISAP_sH); - memcpy(tag, state->B, ISAP_TAG_SIZE); -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ISAP_TAG_SIZE; - - /* Encrypt the plaintext to produce the ciphertext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, c, m, mlen); - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (&state, k, npub, ad, adlen, c, mlen, c + mlen); - return 0; -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - unsigned char tag[ISAP_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ISAP_TAG_SIZE) - return -1; - *mlen = clen - ISAP_TAG_SIZE; - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac)(&state, k, npub, ad, adlen, c, *mlen, tag); - - /* Decrypt the ciphertext to produce the plaintext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, m, c, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, tag, c + *mlen, ISAP_TAG_SIZE); -} - -#endif /* ISAP_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ISAP algorithm */ -#undef ISAP_ALG_NAME -#undef ISAP_RATE -#undef ISAP_sH -#undef ISAP_sE -#undef ISAP_sB -#undef ISAP_sK -#undef ISAP_STATE -#undef ISAP_PERMUTE -#undef ISAP_CONCAT_INNER -#undef ISAP_CONCAT diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak-avr.S deleted file mode 100644 index e50ccaf..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak-avr.S +++ /dev/null @@ -1,1552 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global keccakp_200_permute - .type keccakp_200_permute, @function -keccakp_200_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - ldd r24,Z+24 - push r31 - push r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,130 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - mov r30,r1 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,129 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - ldi r30,136 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,10 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,137 - eor r18,r30 - rcall 82f - ldi r30,3 - eor r18,r30 - rcall 82f - ldi r30,2 - eor r18,r30 - rcall 82f - ldi r30,128 - eor r18,r30 - rjmp 420f -82: - mov r30,r18 - eor r30,r23 - eor r30,r2 - eor r30,r7 - eor r30,r12 - mov r31,r19 - eor r31,r26 - eor r31,r3 - eor r31,r8 - eor r31,r13 - mov r25,r20 - eor r25,r27 - eor r25,r4 - eor r25,r9 - eor r25,r14 - mov r16,r21 - eor r16,r28 - eor r16,r5 - eor r16,r10 - eor r16,r15 - mov r17,r22 - eor r17,r29 - eor r17,r6 - eor r17,r11 - eor r17,r24 - mov r0,r31 - lsl r0 - adc r0,r1 - eor r0,r17 - eor r18,r0 - eor r23,r0 - eor r2,r0 - eor r7,r0 - eor r12,r0 - mov r0,r25 - lsl r0 - adc r0,r1 - eor r0,r30 - eor r19,r0 - eor r26,r0 - eor r3,r0 - eor r8,r0 - eor r13,r0 - mov r0,r16 - lsl r0 - adc r0,r1 - eor r0,r31 - eor r20,r0 - eor r27,r0 - eor r4,r0 - eor r9,r0 - eor r14,r0 - mov r0,r17 - lsl r0 - adc r0,r1 - eor r0,r25 - eor r21,r0 - eor r28,r0 - eor r5,r0 - eor r10,r0 - eor r15,r0 - mov r0,r30 - lsl r0 - adc r0,r1 - eor r0,r16 - eor r22,r0 - eor r29,r0 - eor r6,r0 - eor r11,r0 - eor r24,r0 - mov r30,r19 - swap r26 - mov r19,r26 - swap r29 - mov r26,r29 - mov r0,r1 - lsr r14 - ror r0 - lsr r14 - ror r0 - lsr r14 - ror r0 - or r14,r0 - mov r29,r14 - bst r6,0 - lsr r6 - bld r6,7 - mov r14,r6 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - mov r6,r12 - mov r0,r1 - lsr r20 - ror r0 - lsr r20 - ror r0 - or r20,r0 - mov r12,r20 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - mov r20,r4 - lsl r5 - adc r5,r1 - mov r4,r5 - mov r5,r11 - mov r11,r15 - lsl r7 - adc r7,r1 - mov r15,r7 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - mov r7,r22 - mov r0,r1 - lsr r24 - ror r0 - lsr r24 - ror r0 - or r24,r0 - mov r22,r24 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r24,r13 - bst r28,0 - lsr r28 - bld r28,7 - mov r13,r28 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r28,r8 - swap r23 - mov r8,r23 - swap r21 - mov r23,r21 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r21,r10 - bst r9,0 - lsr r9 - bld r9,7 - mov r10,r9 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - mov r9,r3 - mov r0,r1 - lsr r27 - ror r0 - lsr r27 - ror r0 - or r27,r0 - mov r3,r27 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - mov r27,r2 - lsl r30 - adc r30,r1 - mov r2,r30 - mov r30,r18 - mov r31,r19 - mov r25,r20 - mov r16,r21 - mov r17,r22 - mov r18,r25 - mov r0,r31 - com r0 - and r18,r0 - eor r18,r30 - mov r19,r16 - mov r0,r25 - com r0 - and r19,r0 - eor r19,r31 - mov r20,r17 - mov r0,r16 - com r0 - and r20,r0 - eor r20,r25 - mov r21,r30 - mov r0,r17 - com r0 - and r21,r0 - eor r21,r16 - mov r22,r31 - mov r0,r30 - com r0 - and r22,r0 - eor r22,r17 - mov r30,r23 - mov r31,r26 - mov r25,r27 - mov r16,r28 - mov r17,r29 - mov r23,r25 - mov r0,r31 - com r0 - and r23,r0 - eor r23,r30 - mov r26,r16 - mov r0,r25 - com r0 - and r26,r0 - eor r26,r31 - mov r27,r17 - mov r0,r16 - com r0 - and r27,r0 - eor r27,r25 - mov r28,r30 - mov r0,r17 - com r0 - and r28,r0 - eor r28,r16 - mov r29,r31 - mov r0,r30 - com r0 - and r29,r0 - eor r29,r17 - mov r30,r2 - mov r31,r3 - mov r25,r4 - mov r16,r5 - mov r17,r6 - mov r2,r25 - mov r0,r31 - com r0 - and r2,r0 - eor r2,r30 - mov r3,r16 - mov r0,r25 - com r0 - and r3,r0 - eor r3,r31 - mov r4,r17 - mov r0,r16 - com r0 - and r4,r0 - eor r4,r25 - mov r5,r30 - mov r0,r17 - com r0 - and r5,r0 - eor r5,r16 - mov r6,r31 - mov r0,r30 - com r0 - and r6,r0 - eor r6,r17 - mov r30,r7 - mov r31,r8 - mov r25,r9 - mov r16,r10 - mov r17,r11 - mov r7,r25 - mov r0,r31 - com r0 - and r7,r0 - eor r7,r30 - mov r8,r16 - mov r0,r25 - com r0 - and r8,r0 - eor r8,r31 - mov r9,r17 - mov r0,r16 - com r0 - and r9,r0 - eor r9,r25 - mov r10,r30 - mov r0,r17 - com r0 - and r10,r0 - eor r10,r16 - mov r11,r31 - mov r0,r30 - com r0 - and r11,r0 - eor r11,r17 - mov r30,r12 - mov r31,r13 - mov r25,r14 - mov r16,r15 - mov r17,r24 - mov r12,r25 - mov r0,r31 - com r0 - and r12,r0 - eor r12,r30 - mov r13,r16 - mov r0,r25 - com r0 - and r13,r0 - eor r13,r31 - mov r14,r17 - mov r0,r16 - com r0 - and r14,r0 - eor r14,r25 - mov r15,r30 - mov r0,r17 - com r0 - and r15,r0 - eor r15,r16 - mov r24,r31 - mov r0,r30 - com r0 - and r24,r0 - eor r24,r17 - ret -420: - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - std Z+24,r24 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size keccakp_200_permute, .-keccakp_200_permute - - .text -.global keccakp_400_permute - .type keccakp_400_permute, @function -keccakp_400_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - movw r30,r24 -.L__stack_usage = 17 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - ldd r10,Z+4 - ldd r11,Z+5 - ldd r12,Z+6 - ldd r13,Z+7 - ldd r14,Z+8 - ldd r15,Z+9 - cpi r22,20 - brcs 15f - rcall 153f - ldi r23,1 - eor r6,r23 -15: - cpi r22,19 - brcs 23f - rcall 153f - ldi r23,130 - eor r6,r23 - ldi r17,128 - eor r7,r17 -23: - cpi r22,18 - brcs 31f - rcall 153f - ldi r23,138 - eor r6,r23 - ldi r17,128 - eor r7,r17 -31: - cpi r22,17 - brcs 37f - rcall 153f - ldi r23,128 - eor r7,r23 -37: - cpi r22,16 - brcs 45f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -45: - cpi r22,15 - brcs 51f - rcall 153f - ldi r23,1 - eor r6,r23 -51: - cpi r22,14 - brcs 59f - rcall 153f - ldi r23,129 - eor r6,r23 - ldi r17,128 - eor r7,r17 -59: - cpi r22,13 - brcs 67f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -67: - cpi r22,12 - brcs 73f - rcall 153f - ldi r23,138 - eor r6,r23 -73: - cpi r22,11 - brcs 79f - rcall 153f - ldi r23,136 - eor r6,r23 -79: - cpi r22,10 - brcs 87f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -87: - cpi r22,9 - brcs 93f - rcall 153f - ldi r23,10 - eor r6,r23 -93: - cpi r22,8 - brcs 101f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -101: - cpi r22,7 - brcs 107f - rcall 153f - ldi r23,139 - eor r6,r23 -107: - cpi r22,6 - brcs 115f - rcall 153f - ldi r23,137 - eor r6,r23 - ldi r17,128 - eor r7,r17 -115: - cpi r22,5 - brcs 123f - rcall 153f - ldi r23,3 - eor r6,r23 - ldi r17,128 - eor r7,r17 -123: - cpi r22,4 - brcs 131f - rcall 153f - ldi r23,2 - eor r6,r23 - ldi r17,128 - eor r7,r17 -131: - cpi r22,3 - brcs 137f - rcall 153f - ldi r23,128 - eor r6,r23 -137: - cpi r22,2 - brcs 145f - rcall 153f - ldi r23,10 - eor r6,r23 - ldi r17,128 - eor r7,r17 -145: - cpi r22,1 - brcs 151f - rcall 153f - ldi r23,10 - eor r6,r23 -151: - rjmp 1004f -153: - movw r18,r6 - ldd r0,Z+10 - eor r18,r0 - ldd r0,Z+11 - eor r19,r0 - ldd r0,Z+20 - eor r18,r0 - ldd r0,Z+21 - eor r19,r0 - ldd r0,Z+30 - eor r18,r0 - ldd r0,Z+31 - eor r19,r0 - ldd r0,Z+40 - eor r18,r0 - ldd r0,Z+41 - eor r19,r0 - movw r20,r8 - ldd r0,Z+12 - eor r20,r0 - ldd r0,Z+13 - eor r21,r0 - ldd r0,Z+22 - eor r20,r0 - ldd r0,Z+23 - eor r21,r0 - ldd r0,Z+32 - eor r20,r0 - ldd r0,Z+33 - eor r21,r0 - ldd r0,Z+42 - eor r20,r0 - ldd r0,Z+43 - eor r21,r0 - movw r26,r10 - ldd r0,Z+14 - eor r26,r0 - ldd r0,Z+15 - eor r27,r0 - ldd r0,Z+24 - eor r26,r0 - ldd r0,Z+25 - eor r27,r0 - ldd r0,Z+34 - eor r26,r0 - ldd r0,Z+35 - eor r27,r0 - ldd r0,Z+44 - eor r26,r0 - ldd r0,Z+45 - eor r27,r0 - movw r2,r12 - ldd r0,Z+16 - eor r2,r0 - ldd r0,Z+17 - eor r3,r0 - ldd r0,Z+26 - eor r2,r0 - ldd r0,Z+27 - eor r3,r0 - ldd r0,Z+36 - eor r2,r0 - ldd r0,Z+37 - eor r3,r0 - ldd r0,Z+46 - eor r2,r0 - ldd r0,Z+47 - eor r3,r0 - movw r4,r14 - ldd r0,Z+18 - eor r4,r0 - ldd r0,Z+19 - eor r5,r0 - ldd r0,Z+28 - eor r4,r0 - ldd r0,Z+29 - eor r5,r0 - ldd r0,Z+38 - eor r4,r0 - ldd r0,Z+39 - eor r5,r0 - ldd r0,Z+48 - eor r4,r0 - ldd r0,Z+49 - eor r5,r0 - movw r24,r20 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r4 - eor r25,r5 - eor r6,r24 - eor r7,r25 - ldd r0,Z+10 - eor r0,r24 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r25 - std Z+11,r0 - ldd r0,Z+20 - eor r0,r24 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r25 - std Z+21,r0 - ldd r0,Z+30 - eor r0,r24 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r25 - std Z+31,r0 - ldd r0,Z+40 - eor r0,r24 - std Z+40,r0 - ldd r0,Z+41 - eor r0,r25 - std Z+41,r0 - movw r24,r26 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r8,r24 - eor r9,r25 - ldd r0,Z+12 - eor r0,r24 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r25 - std Z+13,r0 - ldd r0,Z+22 - eor r0,r24 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r25 - std Z+23,r0 - ldd r0,Z+32 - eor r0,r24 - std Z+32,r0 - ldd r0,Z+33 - eor r0,r25 - std Z+33,r0 - ldd r0,Z+42 - eor r0,r24 - std Z+42,r0 - ldd r0,Z+43 - eor r0,r25 - std Z+43,r0 - movw r24,r2 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r10,r24 - eor r11,r25 - ldd r0,Z+14 - eor r0,r24 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r25 - std Z+15,r0 - ldd r0,Z+24 - eor r0,r24 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r25 - std Z+25,r0 - ldd r0,Z+34 - eor r0,r24 - std Z+34,r0 - ldd r0,Z+35 - eor r0,r25 - std Z+35,r0 - ldd r0,Z+44 - eor r0,r24 - std Z+44,r0 - ldd r0,Z+45 - eor r0,r25 - std Z+45,r0 - movw r24,r4 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r26 - eor r25,r27 - eor r12,r24 - eor r13,r25 - ldd r0,Z+16 - eor r0,r24 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r25 - std Z+17,r0 - ldd r0,Z+26 - eor r0,r24 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r25 - std Z+27,r0 - ldd r0,Z+36 - eor r0,r24 - std Z+36,r0 - ldd r0,Z+37 - eor r0,r25 - std Z+37,r0 - ldd r0,Z+46 - eor r0,r24 - std Z+46,r0 - ldd r0,Z+47 - eor r0,r25 - std Z+47,r0 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r2 - eor r25,r3 - eor r14,r24 - eor r15,r25 - ldd r0,Z+18 - eor r0,r24 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r25 - std Z+19,r0 - ldd r0,Z+28 - eor r0,r24 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r25 - std Z+29,r0 - ldd r0,Z+38 - eor r0,r24 - std Z+38,r0 - ldd r0,Z+39 - eor r0,r25 - std Z+39,r0 - ldd r0,Z+48 - eor r0,r24 - std Z+48,r0 - ldd r0,Z+49 - eor r0,r25 - std Z+49,r0 - movw r24,r8 - ldd r8,Z+12 - ldd r9,Z+13 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldd r18,Z+18 - ldd r19,Z+19 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+12,r18 - std Z+13,r19 - ldd r18,Z+44 - ldd r19,Z+45 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+18,r18 - std Z+19,r19 - ldd r18,Z+28 - ldd r19,Z+29 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+44,r18 - std Z+45,r19 - ldd r18,Z+40 - ldd r19,Z+41 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+28,r18 - std Z+29,r19 - movw r18,r10 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+40,r18 - std Z+41,r19 - ldd r10,Z+24 - ldd r11,Z+25 - mov r0,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldd r18,Z+26 - ldd r19,Z+27 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+24,r18 - std Z+25,r19 - ldd r18,Z+38 - ldd r19,Z+39 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+26,r18 - std Z+27,r19 - ldd r18,Z+46 - ldd r19,Z+47 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+38,r18 - std Z+39,r19 - ldd r18,Z+30 - ldd r19,Z+31 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+46,r18 - std Z+47,r19 - movw r18,r14 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+30,r18 - std Z+31,r19 - ldd r14,Z+48 - ldd r15,Z+49 - mov r0,r1 - lsr r15 - ror r14 - ror r0 - lsr r15 - ror r14 - ror r0 - or r15,r0 - ldd r18,Z+42 - ldd r19,Z+43 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+48,r18 - std Z+49,r19 - ldd r18,Z+16 - ldd r19,Z+17 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+42,r18 - std Z+43,r19 - ldd r18,Z+32 - ldd r19,Z+33 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+16,r18 - std Z+17,r19 - ldd r18,Z+10 - ldd r19,Z+11 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+32,r18 - std Z+33,r19 - movw r18,r12 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+10,r18 - std Z+11,r19 - ldd r12,Z+36 - ldd r13,Z+37 - mov r0,r13 - mov r13,r12 - mov r12,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - ldd r18,Z+34 - ldd r19,Z+35 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+36,r18 - std Z+37,r19 - ldd r18,Z+22 - ldd r19,Z+23 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+34,r18 - std Z+35,r19 - ldd r18,Z+14 - ldd r19,Z+15 - mov r0,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+22,r18 - std Z+23,r19 - ldd r18,Z+20 - ldd r19,Z+21 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+14,r18 - std Z+15,r19 - lsl r24 - rol r25 - adc r24,r1 - std Z+20,r24 - std Z+21,r25 - movw r18,r6 - movw r20,r8 - movw r26,r10 - movw r2,r12 - movw r4,r14 - movw r6,r26 - mov r0,r20 - com r0 - and r6,r0 - mov r0,r21 - com r0 - and r7,r0 - eor r6,r18 - eor r7,r19 - movw r8,r2 - mov r0,r26 - com r0 - and r8,r0 - mov r0,r27 - com r0 - and r9,r0 - eor r8,r20 - eor r9,r21 - movw r10,r4 - mov r0,r2 - com r0 - and r10,r0 - mov r0,r3 - com r0 - and r11,r0 - eor r10,r26 - eor r11,r27 - movw r12,r18 - mov r0,r4 - com r0 - and r12,r0 - mov r0,r5 - com r0 - and r13,r0 - eor r12,r2 - eor r13,r3 - movw r14,r20 - mov r0,r18 - com r0 - and r14,r0 - mov r0,r19 - com r0 - and r15,r0 - eor r14,r4 - eor r15,r5 - ldd r18,Z+10 - ldd r19,Z+11 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+10,r24 - std Z+11,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+12,r24 - std Z+13,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+14,r24 - std Z+15,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+16,r24 - std Z+17,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+18,r24 - std Z+19,r25 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+20,r24 - std Z+21,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+22,r24 - std Z+23,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+24,r24 - std Z+25,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+26,r24 - std Z+27,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+28,r24 - std Z+29,r25 - ldd r18,Z+30 - ldd r19,Z+31 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+30,r24 - std Z+31,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+32,r24 - std Z+33,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+34,r24 - std Z+35,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+36,r24 - std Z+37,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+38,r24 - std Z+39,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - ldd r4,Z+48 - ldd r5,Z+49 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+40,r24 - std Z+41,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+42,r24 - std Z+43,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+44,r24 - std Z+45,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+46,r24 - std Z+47,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+48,r24 - std Z+49,r25 - ret -1004: - st Z,r6 - std Z+1,r7 - std Z+2,r8 - std Z+3,r9 - std Z+4,r10 - std Z+5,r11 - std Z+6,r12 - std Z+7,r13 - std Z+8,r14 - std Z+9,r15 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size keccakp_400_permute, .-keccakp_400_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.c b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.c deleted file mode 100644 index 60539df..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-keccak.h" - -#if !defined(__AVR__) - -/* Faster method to compute ((x + y) % 5) that avoids the division */ -static unsigned char const addMod5Table[9] = { - 0, 1, 2, 3, 4, 0, 1, 2, 3 -}; -#define addMod5(x, y) (addMod5Table[(x) + (y)]) - -void keccakp_200_permute(keccakp_200_state_t *state) -{ - static uint8_t const RC[18] = { - 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, - 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, - 0x02, 0x80 - }; - uint8_t C[5]; - uint8_t D; - unsigned round; - unsigned index, index2; - for (round = 0; round < 18; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_8(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate4_8(state->A[1][1]); - state->A[1][1] = leftRotate4_8(state->A[1][4]); - state->A[1][4] = leftRotate5_8(state->A[4][2]); - state->A[4][2] = leftRotate7_8(state->A[2][4]); - state->A[2][4] = leftRotate2_8(state->A[4][0]); - state->A[4][0] = leftRotate6_8(state->A[0][2]); - state->A[0][2] = leftRotate3_8(state->A[2][2]); - state->A[2][2] = leftRotate1_8(state->A[2][3]); - state->A[2][3] = state->A[3][4]; - state->A[3][4] = state->A[4][3]; - state->A[4][3] = leftRotate1_8(state->A[3][0]); - state->A[3][0] = leftRotate3_8(state->A[0][4]); - state->A[0][4] = leftRotate6_8(state->A[4][4]); - state->A[4][4] = leftRotate2_8(state->A[4][1]); - state->A[4][1] = leftRotate7_8(state->A[1][3]); - state->A[1][3] = leftRotate5_8(state->A[3][1]); - state->A[3][1] = leftRotate4_8(state->A[1][0]); - state->A[1][0] = leftRotate4_8(state->A[0][3]); - state->A[0][3] = leftRotate5_8(state->A[3][3]); - state->A[3][3] = leftRotate7_8(state->A[3][2]); - state->A[3][2] = leftRotate2_8(state->A[2][1]); - state->A[2][1] = leftRotate6_8(state->A[1][2]); - state->A[1][2] = leftRotate3_8(state->A[2][0]); - state->A[2][0] = leftRotate1_8(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define keccakp_400_permute_host keccakp_400_permute -#endif - -/* Keccak-p[400] that assumes that the input is already in host byte order */ -void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) -{ - static uint16_t const RC[20] = { - 0x0001, 0x8082, 0x808A, 0x8000, 0x808B, 0x0001, 0x8081, 0x8009, - 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, - 0x8002, 0x0080, 0x800A, 0x000A - }; - uint16_t C[5]; - uint16_t D; - unsigned round; - unsigned index, index2; - for (round = 20 - rounds; round < 20; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_16(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate12_16(state->A[1][1]); - state->A[1][1] = leftRotate4_16 (state->A[1][4]); - state->A[1][4] = leftRotate13_16(state->A[4][2]); - state->A[4][2] = leftRotate7_16 (state->A[2][4]); - state->A[2][4] = leftRotate2_16 (state->A[4][0]); - state->A[4][0] = leftRotate14_16(state->A[0][2]); - state->A[0][2] = leftRotate11_16(state->A[2][2]); - state->A[2][2] = leftRotate9_16 (state->A[2][3]); - state->A[2][3] = leftRotate8_16 (state->A[3][4]); - state->A[3][4] = leftRotate8_16 (state->A[4][3]); - state->A[4][3] = leftRotate9_16 (state->A[3][0]); - state->A[3][0] = leftRotate11_16(state->A[0][4]); - state->A[0][4] = leftRotate14_16(state->A[4][4]); - state->A[4][4] = leftRotate2_16 (state->A[4][1]); - state->A[4][1] = leftRotate7_16 (state->A[1][3]); - state->A[1][3] = leftRotate13_16(state->A[3][1]); - state->A[3][1] = leftRotate4_16 (state->A[1][0]); - state->A[1][0] = leftRotate12_16(state->A[0][3]); - state->A[0][3] = leftRotate5_16 (state->A[3][3]); - state->A[3][3] = leftRotate15_16(state->A[3][2]); - state->A[3][2] = leftRotate10_16(state->A[2][1]); - state->A[2][1] = leftRotate6_16 (state->A[1][2]); - state->A[1][2] = leftRotate3_16 (state->A[2][0]); - state->A[2][0] = leftRotate1_16(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if !defined(LW_UTIL_LITTLE_ENDIAN) - -/** - * \brief Reverses the bytes in a Keccak-p[400] state. - * - * \param state The Keccak-p[400] state to apply byte-reversal to. - */ -static void keccakp_400_reverse_bytes(keccakp_400_state_t *state) -{ - unsigned index; - unsigned char temp1; - unsigned char temp2; - for (index = 0; index < 50; index += 2) { - temp1 = state->B[index]; - temp2 = state->B[index + 1]; - state->B[index] = temp2; - state->B[index + 1] = temp1; - } -} - -/* Keccak-p[400] that requires byte reversal on input and output */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) -{ - keccakp_400_reverse_bytes(state); - keccakp_400_permute_host(state, rounds); - keccakp_400_reverse_bytes(state); -} - -#endif - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.h b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.h deleted file mode 100644 index 2ffef42..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-keccak.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KECCAK_H -#define LW_INTERNAL_KECCAK_H - -#include "internal-util.h" - -/** - * \file internal-keccak.h - * \brief Internal implementation of the Keccak-p permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for the Keccak-p[200] permutation. - */ -#define KECCAKP_200_STATE_SIZE 25 - -/** - * \brief Size of the state for the Keccak-p[400] permutation. - */ -#define KECCAKP_400_STATE_SIZE 50 - -/** - * \brief Structure of the internal state of the Keccak-p[200] permutation. - */ -typedef union -{ - uint8_t A[5][5]; /**< Keccak-p[200] state as a 5x5 array of lanes */ - uint8_t B[25]; /**< Keccak-p[200] state as a byte array */ - -} keccakp_200_state_t; - -/** - * \brief Structure of the internal state of the Keccak-p[400] permutation. - */ -typedef union -{ - uint16_t A[5][5]; /**< Keccak-p[400] state as a 5x5 array of lanes */ - uint8_t B[50]; /**< Keccak-p[400] state as a byte array */ - -} keccakp_400_state_t; - -/** - * \brief Permutes the Keccak-p[200] state. - * - * \param state The Keccak-p[200] state to be permuted. - */ -void keccakp_200_permute(keccakp_200_state_t *state); - -/** - * \brief Permutes the Keccak-p[400] state, which is assumed to be in - * little-endian byte order. - * - * \param state The Keccak-p[400] state to be permuted. - * \param rounds The number of rounds to perform (up to 20). - */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-util.h b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.c b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.c deleted file mode 100644 index 26d50a3..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "isap.h" -#include "internal-keccak.h" -#include "internal-ascon.h" -#include - -aead_cipher_t const isap_keccak_128a_cipher = { - "ISAP-K-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128a_aead_encrypt, - isap_keccak_128a_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128a_cipher = { - "ISAP-A-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128a_aead_encrypt, - isap_ascon_128a_aead_decrypt -}; - -aead_cipher_t const isap_keccak_128_cipher = { - "ISAP-K-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128_aead_encrypt, - isap_keccak_128_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128_cipher = { - "ISAP-A-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128_aead_encrypt, - isap_ascon_128_aead_decrypt -}; - -/* ISAP-K-128A */ -#define ISAP_ALG_NAME isap_keccak_128a -#define ISAP_RATE (144 / 8) -#define ISAP_sH 16 -#define ISAP_sE 8 -#define ISAP_sB 1 -#define ISAP_sK 8 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128A */ -#define ISAP_ALG_NAME isap_ascon_128a -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 6 -#define ISAP_sB 1 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" - -/* ISAP-K-128 */ -#define ISAP_ALG_NAME isap_keccak_128 -#define ISAP_RATE (144 / 8) -#define ISAP_sH 20 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128 */ -#define ISAP_ALG_NAME isap_ascon_128 -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.h b/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.h deleted file mode 100644 index ddf8203..0000000 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys-avr/isap.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ISAP_H -#define LWCRYPTO_ISAP_H - -#include "aead-common.h" - -/** - * \file isap.h - * \brief ISAP authenticated encryption algorithm. - * - * ISAP is a family of authenticated encryption algorithms that are built - * around the Keccak-p[400] or ASCON permutations. There are four algorithms - * in the family, each of which have a 128-bit key, a 128-bit nonce, and a - * 128-bit tag: - * - * \li ISAP-K-128A based around the Keccak-p[400] permutation with a - * reduced number of rounds. This is the primary member in the family. - * \li ISAP-A-128A based around the ASCON permutation with a reduced - * number of rounds. - * \li ISAP-K-128 based around the Keccak-p[400] permutation. - * \li ISAP-A-128 based around the ASCON permutation. - * - * ISAP is designed to provide some protection against adversaries - * using differential power analysis to determine the key. The - * downside is that key setup is very slow. - * - * References: https://isap.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all ISAP family members. - */ -#define ISAP_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all ISAP family members. - */ -#define ISAP_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all ISAP family members. - */ -#define ISAP_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the ISAP-K-128A cipher. - */ -extern aead_cipher_t const isap_keccak_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128A cipher. - */ -extern aead_cipher_t const isap_ascon_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-K-128 cipher. - */ -extern aead_cipher_t const isap_keccak_128_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128 cipher. - */ -extern aead_cipher_t const isap_ascon_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128a_aead_decrypt() - */ -int isap_keccak_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128a_aead_encrypt() - */ -int isap_keccak_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128a_aead_decrypt() - */ -int isap_ascon_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128a_aead_encrypt() - */ -int isap_ascon_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128_aead_decrypt() - */ -int isap_keccak_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128_aead_encrypt() - */ -int isap_keccak_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128_aead_decrypt() - */ -int isap_ascon_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128_aead_encrypt() - */ -int isap_ascon_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon.c b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon.c index 12a8ec6..657aabe 100644 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon.c +++ b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-ascon.c @@ -22,6 +22,8 @@ #include "internal-ascon.h" +#if !defined(__AVR__) + void ascon_permute(ascon_state_t *state, uint8_t first_round) { uint64_t t0, t1, t2, t3, t4; @@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round) state->S[4] = x4; #endif } + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak-avr.S new file mode 100644 index 0000000..e50ccaf --- /dev/null +++ b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak-avr.S @@ -0,0 +1,1552 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global keccakp_200_permute + .type keccakp_200_permute, @function +keccakp_200_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + ldd r24,Z+24 + push r31 + push r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,130 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + mov r30,r1 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,129 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + ldi r30,136 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,10 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,137 + eor r18,r30 + rcall 82f + ldi r30,3 + eor r18,r30 + rcall 82f + ldi r30,2 + eor r18,r30 + rcall 82f + ldi r30,128 + eor r18,r30 + rjmp 420f +82: + mov r30,r18 + eor r30,r23 + eor r30,r2 + eor r30,r7 + eor r30,r12 + mov r31,r19 + eor r31,r26 + eor r31,r3 + eor r31,r8 + eor r31,r13 + mov r25,r20 + eor r25,r27 + eor r25,r4 + eor r25,r9 + eor r25,r14 + mov r16,r21 + eor r16,r28 + eor r16,r5 + eor r16,r10 + eor r16,r15 + mov r17,r22 + eor r17,r29 + eor r17,r6 + eor r17,r11 + eor r17,r24 + mov r0,r31 + lsl r0 + adc r0,r1 + eor r0,r17 + eor r18,r0 + eor r23,r0 + eor r2,r0 + eor r7,r0 + eor r12,r0 + mov r0,r25 + lsl r0 + adc r0,r1 + eor r0,r30 + eor r19,r0 + eor r26,r0 + eor r3,r0 + eor r8,r0 + eor r13,r0 + mov r0,r16 + lsl r0 + adc r0,r1 + eor r0,r31 + eor r20,r0 + eor r27,r0 + eor r4,r0 + eor r9,r0 + eor r14,r0 + mov r0,r17 + lsl r0 + adc r0,r1 + eor r0,r25 + eor r21,r0 + eor r28,r0 + eor r5,r0 + eor r10,r0 + eor r15,r0 + mov r0,r30 + lsl r0 + adc r0,r1 + eor r0,r16 + eor r22,r0 + eor r29,r0 + eor r6,r0 + eor r11,r0 + eor r24,r0 + mov r30,r19 + swap r26 + mov r19,r26 + swap r29 + mov r26,r29 + mov r0,r1 + lsr r14 + ror r0 + lsr r14 + ror r0 + lsr r14 + ror r0 + or r14,r0 + mov r29,r14 + bst r6,0 + lsr r6 + bld r6,7 + mov r14,r6 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + mov r6,r12 + mov r0,r1 + lsr r20 + ror r0 + lsr r20 + ror r0 + or r20,r0 + mov r12,r20 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + mov r20,r4 + lsl r5 + adc r5,r1 + mov r4,r5 + mov r5,r11 + mov r11,r15 + lsl r7 + adc r7,r1 + mov r15,r7 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + mov r7,r22 + mov r0,r1 + lsr r24 + ror r0 + lsr r24 + ror r0 + or r24,r0 + mov r22,r24 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r24,r13 + bst r28,0 + lsr r28 + bld r28,7 + mov r13,r28 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r28,r8 + swap r23 + mov r8,r23 + swap r21 + mov r23,r21 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r21,r10 + bst r9,0 + lsr r9 + bld r9,7 + mov r10,r9 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + mov r9,r3 + mov r0,r1 + lsr r27 + ror r0 + lsr r27 + ror r0 + or r27,r0 + mov r3,r27 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + mov r27,r2 + lsl r30 + adc r30,r1 + mov r2,r30 + mov r30,r18 + mov r31,r19 + mov r25,r20 + mov r16,r21 + mov r17,r22 + mov r18,r25 + mov r0,r31 + com r0 + and r18,r0 + eor r18,r30 + mov r19,r16 + mov r0,r25 + com r0 + and r19,r0 + eor r19,r31 + mov r20,r17 + mov r0,r16 + com r0 + and r20,r0 + eor r20,r25 + mov r21,r30 + mov r0,r17 + com r0 + and r21,r0 + eor r21,r16 + mov r22,r31 + mov r0,r30 + com r0 + and r22,r0 + eor r22,r17 + mov r30,r23 + mov r31,r26 + mov r25,r27 + mov r16,r28 + mov r17,r29 + mov r23,r25 + mov r0,r31 + com r0 + and r23,r0 + eor r23,r30 + mov r26,r16 + mov r0,r25 + com r0 + and r26,r0 + eor r26,r31 + mov r27,r17 + mov r0,r16 + com r0 + and r27,r0 + eor r27,r25 + mov r28,r30 + mov r0,r17 + com r0 + and r28,r0 + eor r28,r16 + mov r29,r31 + mov r0,r30 + com r0 + and r29,r0 + eor r29,r17 + mov r30,r2 + mov r31,r3 + mov r25,r4 + mov r16,r5 + mov r17,r6 + mov r2,r25 + mov r0,r31 + com r0 + and r2,r0 + eor r2,r30 + mov r3,r16 + mov r0,r25 + com r0 + and r3,r0 + eor r3,r31 + mov r4,r17 + mov r0,r16 + com r0 + and r4,r0 + eor r4,r25 + mov r5,r30 + mov r0,r17 + com r0 + and r5,r0 + eor r5,r16 + mov r6,r31 + mov r0,r30 + com r0 + and r6,r0 + eor r6,r17 + mov r30,r7 + mov r31,r8 + mov r25,r9 + mov r16,r10 + mov r17,r11 + mov r7,r25 + mov r0,r31 + com r0 + and r7,r0 + eor r7,r30 + mov r8,r16 + mov r0,r25 + com r0 + and r8,r0 + eor r8,r31 + mov r9,r17 + mov r0,r16 + com r0 + and r9,r0 + eor r9,r25 + mov r10,r30 + mov r0,r17 + com r0 + and r10,r0 + eor r10,r16 + mov r11,r31 + mov r0,r30 + com r0 + and r11,r0 + eor r11,r17 + mov r30,r12 + mov r31,r13 + mov r25,r14 + mov r16,r15 + mov r17,r24 + mov r12,r25 + mov r0,r31 + com r0 + and r12,r0 + eor r12,r30 + mov r13,r16 + mov r0,r25 + com r0 + and r13,r0 + eor r13,r31 + mov r14,r17 + mov r0,r16 + com r0 + and r14,r0 + eor r14,r25 + mov r15,r30 + mov r0,r17 + com r0 + and r15,r0 + eor r15,r16 + mov r24,r31 + mov r0,r30 + com r0 + and r24,r0 + eor r24,r17 + ret +420: + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + std Z+24,r24 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size keccakp_200_permute, .-keccakp_200_permute + + .text +.global keccakp_400_permute + .type keccakp_400_permute, @function +keccakp_400_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + movw r30,r24 +.L__stack_usage = 17 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + ldd r10,Z+4 + ldd r11,Z+5 + ldd r12,Z+6 + ldd r13,Z+7 + ldd r14,Z+8 + ldd r15,Z+9 + cpi r22,20 + brcs 15f + rcall 153f + ldi r23,1 + eor r6,r23 +15: + cpi r22,19 + brcs 23f + rcall 153f + ldi r23,130 + eor r6,r23 + ldi r17,128 + eor r7,r17 +23: + cpi r22,18 + brcs 31f + rcall 153f + ldi r23,138 + eor r6,r23 + ldi r17,128 + eor r7,r17 +31: + cpi r22,17 + brcs 37f + rcall 153f + ldi r23,128 + eor r7,r23 +37: + cpi r22,16 + brcs 45f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +45: + cpi r22,15 + brcs 51f + rcall 153f + ldi r23,1 + eor r6,r23 +51: + cpi r22,14 + brcs 59f + rcall 153f + ldi r23,129 + eor r6,r23 + ldi r17,128 + eor r7,r17 +59: + cpi r22,13 + brcs 67f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +67: + cpi r22,12 + brcs 73f + rcall 153f + ldi r23,138 + eor r6,r23 +73: + cpi r22,11 + brcs 79f + rcall 153f + ldi r23,136 + eor r6,r23 +79: + cpi r22,10 + brcs 87f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +87: + cpi r22,9 + brcs 93f + rcall 153f + ldi r23,10 + eor r6,r23 +93: + cpi r22,8 + brcs 101f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +101: + cpi r22,7 + brcs 107f + rcall 153f + ldi r23,139 + eor r6,r23 +107: + cpi r22,6 + brcs 115f + rcall 153f + ldi r23,137 + eor r6,r23 + ldi r17,128 + eor r7,r17 +115: + cpi r22,5 + brcs 123f + rcall 153f + ldi r23,3 + eor r6,r23 + ldi r17,128 + eor r7,r17 +123: + cpi r22,4 + brcs 131f + rcall 153f + ldi r23,2 + eor r6,r23 + ldi r17,128 + eor r7,r17 +131: + cpi r22,3 + brcs 137f + rcall 153f + ldi r23,128 + eor r6,r23 +137: + cpi r22,2 + brcs 145f + rcall 153f + ldi r23,10 + eor r6,r23 + ldi r17,128 + eor r7,r17 +145: + cpi r22,1 + brcs 151f + rcall 153f + ldi r23,10 + eor r6,r23 +151: + rjmp 1004f +153: + movw r18,r6 + ldd r0,Z+10 + eor r18,r0 + ldd r0,Z+11 + eor r19,r0 + ldd r0,Z+20 + eor r18,r0 + ldd r0,Z+21 + eor r19,r0 + ldd r0,Z+30 + eor r18,r0 + ldd r0,Z+31 + eor r19,r0 + ldd r0,Z+40 + eor r18,r0 + ldd r0,Z+41 + eor r19,r0 + movw r20,r8 + ldd r0,Z+12 + eor r20,r0 + ldd r0,Z+13 + eor r21,r0 + ldd r0,Z+22 + eor r20,r0 + ldd r0,Z+23 + eor r21,r0 + ldd r0,Z+32 + eor r20,r0 + ldd r0,Z+33 + eor r21,r0 + ldd r0,Z+42 + eor r20,r0 + ldd r0,Z+43 + eor r21,r0 + movw r26,r10 + ldd r0,Z+14 + eor r26,r0 + ldd r0,Z+15 + eor r27,r0 + ldd r0,Z+24 + eor r26,r0 + ldd r0,Z+25 + eor r27,r0 + ldd r0,Z+34 + eor r26,r0 + ldd r0,Z+35 + eor r27,r0 + ldd r0,Z+44 + eor r26,r0 + ldd r0,Z+45 + eor r27,r0 + movw r2,r12 + ldd r0,Z+16 + eor r2,r0 + ldd r0,Z+17 + eor r3,r0 + ldd r0,Z+26 + eor r2,r0 + ldd r0,Z+27 + eor r3,r0 + ldd r0,Z+36 + eor r2,r0 + ldd r0,Z+37 + eor r3,r0 + ldd r0,Z+46 + eor r2,r0 + ldd r0,Z+47 + eor r3,r0 + movw r4,r14 + ldd r0,Z+18 + eor r4,r0 + ldd r0,Z+19 + eor r5,r0 + ldd r0,Z+28 + eor r4,r0 + ldd r0,Z+29 + eor r5,r0 + ldd r0,Z+38 + eor r4,r0 + ldd r0,Z+39 + eor r5,r0 + ldd r0,Z+48 + eor r4,r0 + ldd r0,Z+49 + eor r5,r0 + movw r24,r20 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r4 + eor r25,r5 + eor r6,r24 + eor r7,r25 + ldd r0,Z+10 + eor r0,r24 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r25 + std Z+11,r0 + ldd r0,Z+20 + eor r0,r24 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r25 + std Z+21,r0 + ldd r0,Z+30 + eor r0,r24 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r25 + std Z+31,r0 + ldd r0,Z+40 + eor r0,r24 + std Z+40,r0 + ldd r0,Z+41 + eor r0,r25 + std Z+41,r0 + movw r24,r26 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r8,r24 + eor r9,r25 + ldd r0,Z+12 + eor r0,r24 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r25 + std Z+13,r0 + ldd r0,Z+22 + eor r0,r24 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r25 + std Z+23,r0 + ldd r0,Z+32 + eor r0,r24 + std Z+32,r0 + ldd r0,Z+33 + eor r0,r25 + std Z+33,r0 + ldd r0,Z+42 + eor r0,r24 + std Z+42,r0 + ldd r0,Z+43 + eor r0,r25 + std Z+43,r0 + movw r24,r2 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r10,r24 + eor r11,r25 + ldd r0,Z+14 + eor r0,r24 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r25 + std Z+15,r0 + ldd r0,Z+24 + eor r0,r24 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r25 + std Z+25,r0 + ldd r0,Z+34 + eor r0,r24 + std Z+34,r0 + ldd r0,Z+35 + eor r0,r25 + std Z+35,r0 + ldd r0,Z+44 + eor r0,r24 + std Z+44,r0 + ldd r0,Z+45 + eor r0,r25 + std Z+45,r0 + movw r24,r4 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r26 + eor r25,r27 + eor r12,r24 + eor r13,r25 + ldd r0,Z+16 + eor r0,r24 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r25 + std Z+17,r0 + ldd r0,Z+26 + eor r0,r24 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r25 + std Z+27,r0 + ldd r0,Z+36 + eor r0,r24 + std Z+36,r0 + ldd r0,Z+37 + eor r0,r25 + std Z+37,r0 + ldd r0,Z+46 + eor r0,r24 + std Z+46,r0 + ldd r0,Z+47 + eor r0,r25 + std Z+47,r0 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r2 + eor r25,r3 + eor r14,r24 + eor r15,r25 + ldd r0,Z+18 + eor r0,r24 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r25 + std Z+19,r0 + ldd r0,Z+28 + eor r0,r24 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r25 + std Z+29,r0 + ldd r0,Z+38 + eor r0,r24 + std Z+38,r0 + ldd r0,Z+39 + eor r0,r25 + std Z+39,r0 + ldd r0,Z+48 + eor r0,r24 + std Z+48,r0 + ldd r0,Z+49 + eor r0,r25 + std Z+49,r0 + movw r24,r8 + ldd r8,Z+12 + ldd r9,Z+13 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldd r18,Z+18 + ldd r19,Z+19 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+12,r18 + std Z+13,r19 + ldd r18,Z+44 + ldd r19,Z+45 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+18,r18 + std Z+19,r19 + ldd r18,Z+28 + ldd r19,Z+29 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+44,r18 + std Z+45,r19 + ldd r18,Z+40 + ldd r19,Z+41 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+28,r18 + std Z+29,r19 + movw r18,r10 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+40,r18 + std Z+41,r19 + ldd r10,Z+24 + ldd r11,Z+25 + mov r0,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldd r18,Z+26 + ldd r19,Z+27 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+24,r18 + std Z+25,r19 + ldd r18,Z+38 + ldd r19,Z+39 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+26,r18 + std Z+27,r19 + ldd r18,Z+46 + ldd r19,Z+47 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+38,r18 + std Z+39,r19 + ldd r18,Z+30 + ldd r19,Z+31 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+46,r18 + std Z+47,r19 + movw r18,r14 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+30,r18 + std Z+31,r19 + ldd r14,Z+48 + ldd r15,Z+49 + mov r0,r1 + lsr r15 + ror r14 + ror r0 + lsr r15 + ror r14 + ror r0 + or r15,r0 + ldd r18,Z+42 + ldd r19,Z+43 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+48,r18 + std Z+49,r19 + ldd r18,Z+16 + ldd r19,Z+17 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+42,r18 + std Z+43,r19 + ldd r18,Z+32 + ldd r19,Z+33 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+16,r18 + std Z+17,r19 + ldd r18,Z+10 + ldd r19,Z+11 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+32,r18 + std Z+33,r19 + movw r18,r12 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+10,r18 + std Z+11,r19 + ldd r12,Z+36 + ldd r13,Z+37 + mov r0,r13 + mov r13,r12 + mov r12,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + ldd r18,Z+34 + ldd r19,Z+35 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+36,r18 + std Z+37,r19 + ldd r18,Z+22 + ldd r19,Z+23 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+34,r18 + std Z+35,r19 + ldd r18,Z+14 + ldd r19,Z+15 + mov r0,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+22,r18 + std Z+23,r19 + ldd r18,Z+20 + ldd r19,Z+21 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+14,r18 + std Z+15,r19 + lsl r24 + rol r25 + adc r24,r1 + std Z+20,r24 + std Z+21,r25 + movw r18,r6 + movw r20,r8 + movw r26,r10 + movw r2,r12 + movw r4,r14 + movw r6,r26 + mov r0,r20 + com r0 + and r6,r0 + mov r0,r21 + com r0 + and r7,r0 + eor r6,r18 + eor r7,r19 + movw r8,r2 + mov r0,r26 + com r0 + and r8,r0 + mov r0,r27 + com r0 + and r9,r0 + eor r8,r20 + eor r9,r21 + movw r10,r4 + mov r0,r2 + com r0 + and r10,r0 + mov r0,r3 + com r0 + and r11,r0 + eor r10,r26 + eor r11,r27 + movw r12,r18 + mov r0,r4 + com r0 + and r12,r0 + mov r0,r5 + com r0 + and r13,r0 + eor r12,r2 + eor r13,r3 + movw r14,r20 + mov r0,r18 + com r0 + and r14,r0 + mov r0,r19 + com r0 + and r15,r0 + eor r14,r4 + eor r15,r5 + ldd r18,Z+10 + ldd r19,Z+11 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+10,r24 + std Z+11,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+12,r24 + std Z+13,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+14,r24 + std Z+15,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+16,r24 + std Z+17,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+18,r24 + std Z+19,r25 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+20,r24 + std Z+21,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+22,r24 + std Z+23,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+24,r24 + std Z+25,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+26,r24 + std Z+27,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+28,r24 + std Z+29,r25 + ldd r18,Z+30 + ldd r19,Z+31 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+30,r24 + std Z+31,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+32,r24 + std Z+33,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+34,r24 + std Z+35,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+36,r24 + std Z+37,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+38,r24 + std Z+39,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + ldd r4,Z+48 + ldd r5,Z+49 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+40,r24 + std Z+41,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+42,r24 + std Z+43,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+44,r24 + std Z+45,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+46,r24 + std Z+47,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+48,r24 + std Z+49,r25 + ret +1004: + st Z,r6 + std Z+1,r7 + std Z+2,r8 + std Z+3,r9 + std Z+4,r10 + std Z+5,r11 + std Z+6,r12 + std Z+7,r13 + std Z+8,r14 + std Z+9,r15 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size keccakp_400_permute, .-keccakp_400_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.c b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.c index c3c4011..60539df 100644 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.c +++ b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.c @@ -22,74 +22,79 @@ #include "internal-keccak.h" +#if !defined(__AVR__) + /* Faster method to compute ((x + y) % 5) that avoids the division */ static unsigned char const addMod5Table[9] = { 0, 1, 2, 3, 4, 0, 1, 2, 3 }; #define addMod5(x, y) (addMod5Table[(x) + (y)]) -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds) +void keccakp_200_permute(keccakp_200_state_t *state) { static uint8_t const RC[18] = { 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, 0x02, 0x80 }; - uint8_t B[5][5]; + uint8_t C[5]; uint8_t D; unsigned round; unsigned index, index2; - for (round = 18 - rounds; round < 18; ++round) { + for (round = 0; round < 18; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_8(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_8(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate4_8(state->A[0][3]); - B[2][0] = leftRotate1_8(state->A[0][1]); - B[3][0] = leftRotate3_8(state->A[0][4]); - B[4][0] = leftRotate6_8(state->A[0][2]); - B[0][1] = leftRotate4_8(state->A[1][1]); - B[1][1] = leftRotate4_8(state->A[1][4]); - B[2][1] = leftRotate6_8(state->A[1][2]); - B[3][1] = leftRotate4_8(state->A[1][0]); - B[4][1] = leftRotate7_8(state->A[1][3]); - B[0][2] = leftRotate3_8(state->A[2][2]); - B[1][2] = leftRotate3_8(state->A[2][0]); - B[2][2] = leftRotate1_8(state->A[2][3]); - B[3][2] = leftRotate2_8(state->A[2][1]); - B[4][2] = leftRotate7_8(state->A[2][4]); - B[0][3] = leftRotate5_8(state->A[3][3]); - B[1][3] = leftRotate5_8(state->A[3][1]); - B[2][3] = state->A[3][4]; - B[3][3] = leftRotate7_8(state->A[3][2]); - B[4][3] = leftRotate1_8(state->A[3][0]); - B[0][4] = leftRotate6_8(state->A[4][4]); - B[1][4] = leftRotate5_8(state->A[4][2]); - B[2][4] = leftRotate2_8(state->A[4][0]); - B[3][4] = state->A[4][3]; - B[4][4] = leftRotate2_8(state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate4_8(state->A[1][1]); + state->A[1][1] = leftRotate4_8(state->A[1][4]); + state->A[1][4] = leftRotate5_8(state->A[4][2]); + state->A[4][2] = leftRotate7_8(state->A[2][4]); + state->A[2][4] = leftRotate2_8(state->A[4][0]); + state->A[4][0] = leftRotate6_8(state->A[0][2]); + state->A[0][2] = leftRotate3_8(state->A[2][2]); + state->A[2][2] = leftRotate1_8(state->A[2][3]); + state->A[2][3] = state->A[3][4]; + state->A[3][4] = state->A[4][3]; + state->A[4][3] = leftRotate1_8(state->A[3][0]); + state->A[3][0] = leftRotate3_8(state->A[0][4]); + state->A[0][4] = leftRotate6_8(state->A[4][4]); + state->A[4][4] = leftRotate2_8(state->A[4][1]); + state->A[4][1] = leftRotate7_8(state->A[1][3]); + state->A[1][3] = leftRotate5_8(state->A[3][1]); + state->A[3][1] = leftRotate4_8(state->A[1][0]); + state->A[1][0] = leftRotate4_8(state->A[0][3]); + state->A[0][3] = leftRotate5_8(state->A[3][3]); + state->A[3][3] = leftRotate7_8(state->A[3][2]); + state->A[3][2] = leftRotate2_8(state->A[2][1]); + state->A[2][1] = leftRotate6_8(state->A[1][2]); + state->A[1][2] = leftRotate3_8(state->A[2][0]); + state->A[2][0] = leftRotate1_8(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -110,61 +115,64 @@ void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, 0x8002, 0x0080, 0x800A, 0x000A }; - uint16_t B[5][5]; + uint16_t C[5]; uint16_t D; unsigned round; unsigned index, index2; for (round = 20 - rounds; round < 20; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_16(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_16(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate12_16(state->A[0][3]); - B[2][0] = leftRotate1_16 (state->A[0][1]); - B[3][0] = leftRotate11_16(state->A[0][4]); - B[4][0] = leftRotate14_16(state->A[0][2]); - B[0][1] = leftRotate12_16(state->A[1][1]); - B[1][1] = leftRotate4_16 (state->A[1][4]); - B[2][1] = leftRotate6_16 (state->A[1][2]); - B[3][1] = leftRotate4_16 (state->A[1][0]); - B[4][1] = leftRotate7_16 (state->A[1][3]); - B[0][2] = leftRotate11_16(state->A[2][2]); - B[1][2] = leftRotate3_16 (state->A[2][0]); - B[2][2] = leftRotate9_16 (state->A[2][3]); - B[3][2] = leftRotate10_16(state->A[2][1]); - B[4][2] = leftRotate7_16 (state->A[2][4]); - B[0][3] = leftRotate5_16 (state->A[3][3]); - B[1][3] = leftRotate13_16(state->A[3][1]); - B[2][3] = leftRotate8_16 (state->A[3][4]); - B[3][3] = leftRotate15_16(state->A[3][2]); - B[4][3] = leftRotate9_16 (state->A[3][0]); - B[0][4] = leftRotate14_16(state->A[4][4]); - B[1][4] = leftRotate13_16(state->A[4][2]); - B[2][4] = leftRotate2_16 (state->A[4][0]); - B[3][4] = leftRotate8_16 (state->A[4][3]); - B[4][4] = leftRotate2_16 (state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate12_16(state->A[1][1]); + state->A[1][1] = leftRotate4_16 (state->A[1][4]); + state->A[1][4] = leftRotate13_16(state->A[4][2]); + state->A[4][2] = leftRotate7_16 (state->A[2][4]); + state->A[2][4] = leftRotate2_16 (state->A[4][0]); + state->A[4][0] = leftRotate14_16(state->A[0][2]); + state->A[0][2] = leftRotate11_16(state->A[2][2]); + state->A[2][2] = leftRotate9_16 (state->A[2][3]); + state->A[2][3] = leftRotate8_16 (state->A[3][4]); + state->A[3][4] = leftRotate8_16 (state->A[4][3]); + state->A[4][3] = leftRotate9_16 (state->A[3][0]); + state->A[3][0] = leftRotate11_16(state->A[0][4]); + state->A[0][4] = leftRotate14_16(state->A[4][4]); + state->A[4][4] = leftRotate2_16 (state->A[4][1]); + state->A[4][1] = leftRotate7_16 (state->A[1][3]); + state->A[1][3] = leftRotate13_16(state->A[3][1]); + state->A[3][1] = leftRotate4_16 (state->A[1][0]); + state->A[1][0] = leftRotate12_16(state->A[0][3]); + state->A[0][3] = leftRotate5_16 (state->A[3][3]); + state->A[3][3] = leftRotate15_16(state->A[3][2]); + state->A[3][2] = leftRotate10_16(state->A[2][1]); + state->A[2][1] = leftRotate6_16 (state->A[1][2]); + state->A[1][2] = leftRotate3_16 (state->A[2][0]); + state->A[2][0] = leftRotate1_16(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -202,3 +210,5 @@ void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) } #endif + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.h b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.h index 026da50..2ffef42 100644 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.h +++ b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-keccak.h @@ -68,9 +68,8 @@ typedef union * \brief Permutes the Keccak-p[200] state. * * \param state The Keccak-p[200] state to be permuted. - * \param rounds The number of rounds to perform (up to 18). */ -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds); +void keccakp_200_permute(keccakp_200_state_t *state); /** * \brief Permutes the Keccak-p[400] state, which is assumed to be in diff --git a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-util.h b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-util.h index e79158c..e30166d 100644 --- a/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-util.h +++ b/isap/Implementations/crypto_aead/isapa128av20/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.c b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.h b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/api.h b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/encrypt.c b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/encrypt.c deleted file mode 100644 index 7b2bc3a..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "isap.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_ascon_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_ascon_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.c b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.h b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-isap.h b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-isap.h deleted file mode 100644 index ba99f2a..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-isap.h +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ISAP variant. - * - * ISAP_ALG_NAME Name of the ISAP algorithm; e.g. isap_keccak_128 - * ISAP_RATE Number of bytes in the rate for hashing and encryption. - * ISAP_sH Number of rounds for hashing. - * ISAP_sE Number of rounds for encryption. - * ISAP_sB Number of rounds for key bit absorption. - * ISAP_sK Number of rounds for keying. - * ISAP_STATE Type for the permuation state; e.g. ascon_state_t - * ISAP_PERMUTE(s,r) Permutes the state "s" with number of rounds "r". - */ -#if defined(ISAP_ALG_NAME) - -#define ISAP_CONCAT_INNER(name,suffix) name##suffix -#define ISAP_CONCAT(name,suffix) ISAP_CONCAT_INNER(name,suffix) - -/* IV string for initialising the associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_A) - [sizeof(ISAP_STATE) - ISAP_NONCE_SIZE] = { - 0x01, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for authenticating associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x02, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for encrypting payload data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x03, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/** - * \brief Re-keys the ISAP permutation state. - * - * \param state The permutation state to be re-keyed. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param iv Points to the initialization vector for this re-keying operation. - * \param data Points to the data to be absorbed to perform the re-keying. - * \param data_len Length of the data to be absorbed. - * - * The output key will be left in the leading bytes of \a state. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *iv, - const unsigned char *data, unsigned data_len) -{ - unsigned bit, num_bits; - - /* Initialize the state with the key and IV */ - memcpy(state->B, k, ISAP_KEY_SIZE); - memcpy(state->B + ISAP_KEY_SIZE, iv, sizeof(state->B) - ISAP_KEY_SIZE); - ISAP_PERMUTE(state, ISAP_sK); - - /* Absorb all of the bits of the data buffer one by one */ - num_bits = data_len * 8 - 1; - for (bit = 0; bit < num_bits; ++bit) { - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sB); - } - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sK); -} - -/** - * \brief Encrypts (or decrypts) a message payload with ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param c Buffer to receive the output ciphertext. - * \param m Buffer to receive the input plaintext. - * \param mlen Length of the input plaintext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_encrypt) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Set up the re-keyed encryption key and nonce in the state */ - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE), npub, ISAP_NONCE_SIZE); - memcpy(state->B + sizeof(ISAP_STATE) - ISAP_NONCE_SIZE, - npub, ISAP_NONCE_SIZE); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= ISAP_RATE) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, ISAP_RATE); - c += ISAP_RATE; - m += ISAP_RATE; - mlen -= ISAP_RATE; - } - if (mlen > 0) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, (unsigned)mlen); - } -} - -/** - * \brief Authenticates the associated data and ciphertext using ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param ad Buffer containing the associated data. - * \param adlen Length of the associated data. - * \param c Buffer containing the ciphertext. - * \param clen Length of the ciphertext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *c, unsigned long long clen, - unsigned char *tag) -{ - unsigned char preserve[sizeof(ISAP_STATE) - ISAP_TAG_SIZE]; - unsigned temp; - - /* Absorb the associated data */ - memcpy(state->B, npub, ISAP_NONCE_SIZE); - memcpy(state->B + ISAP_NONCE_SIZE, ISAP_CONCAT(ISAP_ALG_NAME,_IV_A), - sizeof(state->B) - ISAP_NONCE_SIZE); - ISAP_PERMUTE(state, ISAP_sH); - while (adlen >= ISAP_RATE) { - lw_xor_block(state->B, ad, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - ad += ISAP_RATE; - adlen -= ISAP_RATE; - } - temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - state->B[sizeof(state->B) - 1] ^= 0x01; /* domain separation */ - - /* Absorb the ciphertext */ - while (clen >= ISAP_RATE) { - lw_xor_block(state->B, c, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - c += ISAP_RATE; - clen -= ISAP_RATE; - } - temp = (unsigned)clen; - lw_xor_block(state->B, c, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - - /* Re-key the state and generate the authentication tag */ - memcpy(tag, state->B, ISAP_TAG_SIZE); - memcpy(preserve, state->B + ISAP_TAG_SIZE, sizeof(preserve)); - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA), tag, ISAP_TAG_SIZE); - memcpy(state->B + ISAP_TAG_SIZE, preserve, sizeof(preserve)); - ISAP_PERMUTE(state, ISAP_sH); - memcpy(tag, state->B, ISAP_TAG_SIZE); -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ISAP_TAG_SIZE; - - /* Encrypt the plaintext to produce the ciphertext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, c, m, mlen); - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (&state, k, npub, ad, adlen, c, mlen, c + mlen); - return 0; -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - unsigned char tag[ISAP_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ISAP_TAG_SIZE) - return -1; - *mlen = clen - ISAP_TAG_SIZE; - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac)(&state, k, npub, ad, adlen, c, *mlen, tag); - - /* Decrypt the ciphertext to produce the plaintext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, m, c, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, tag, c + *mlen, ISAP_TAG_SIZE); -} - -#endif /* ISAP_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ISAP algorithm */ -#undef ISAP_ALG_NAME -#undef ISAP_RATE -#undef ISAP_sH -#undef ISAP_sE -#undef ISAP_sB -#undef ISAP_sK -#undef ISAP_STATE -#undef ISAP_PERMUTE -#undef ISAP_CONCAT_INNER -#undef ISAP_CONCAT diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak-avr.S deleted file mode 100644 index e50ccaf..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak-avr.S +++ /dev/null @@ -1,1552 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global keccakp_200_permute - .type keccakp_200_permute, @function -keccakp_200_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - ldd r24,Z+24 - push r31 - push r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,130 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - mov r30,r1 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,129 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - ldi r30,136 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,10 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,137 - eor r18,r30 - rcall 82f - ldi r30,3 - eor r18,r30 - rcall 82f - ldi r30,2 - eor r18,r30 - rcall 82f - ldi r30,128 - eor r18,r30 - rjmp 420f -82: - mov r30,r18 - eor r30,r23 - eor r30,r2 - eor r30,r7 - eor r30,r12 - mov r31,r19 - eor r31,r26 - eor r31,r3 - eor r31,r8 - eor r31,r13 - mov r25,r20 - eor r25,r27 - eor r25,r4 - eor r25,r9 - eor r25,r14 - mov r16,r21 - eor r16,r28 - eor r16,r5 - eor r16,r10 - eor r16,r15 - mov r17,r22 - eor r17,r29 - eor r17,r6 - eor r17,r11 - eor r17,r24 - mov r0,r31 - lsl r0 - adc r0,r1 - eor r0,r17 - eor r18,r0 - eor r23,r0 - eor r2,r0 - eor r7,r0 - eor r12,r0 - mov r0,r25 - lsl r0 - adc r0,r1 - eor r0,r30 - eor r19,r0 - eor r26,r0 - eor r3,r0 - eor r8,r0 - eor r13,r0 - mov r0,r16 - lsl r0 - adc r0,r1 - eor r0,r31 - eor r20,r0 - eor r27,r0 - eor r4,r0 - eor r9,r0 - eor r14,r0 - mov r0,r17 - lsl r0 - adc r0,r1 - eor r0,r25 - eor r21,r0 - eor r28,r0 - eor r5,r0 - eor r10,r0 - eor r15,r0 - mov r0,r30 - lsl r0 - adc r0,r1 - eor r0,r16 - eor r22,r0 - eor r29,r0 - eor r6,r0 - eor r11,r0 - eor r24,r0 - mov r30,r19 - swap r26 - mov r19,r26 - swap r29 - mov r26,r29 - mov r0,r1 - lsr r14 - ror r0 - lsr r14 - ror r0 - lsr r14 - ror r0 - or r14,r0 - mov r29,r14 - bst r6,0 - lsr r6 - bld r6,7 - mov r14,r6 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - mov r6,r12 - mov r0,r1 - lsr r20 - ror r0 - lsr r20 - ror r0 - or r20,r0 - mov r12,r20 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - mov r20,r4 - lsl r5 - adc r5,r1 - mov r4,r5 - mov r5,r11 - mov r11,r15 - lsl r7 - adc r7,r1 - mov r15,r7 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - mov r7,r22 - mov r0,r1 - lsr r24 - ror r0 - lsr r24 - ror r0 - or r24,r0 - mov r22,r24 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r24,r13 - bst r28,0 - lsr r28 - bld r28,7 - mov r13,r28 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r28,r8 - swap r23 - mov r8,r23 - swap r21 - mov r23,r21 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r21,r10 - bst r9,0 - lsr r9 - bld r9,7 - mov r10,r9 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - mov r9,r3 - mov r0,r1 - lsr r27 - ror r0 - lsr r27 - ror r0 - or r27,r0 - mov r3,r27 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - mov r27,r2 - lsl r30 - adc r30,r1 - mov r2,r30 - mov r30,r18 - mov r31,r19 - mov r25,r20 - mov r16,r21 - mov r17,r22 - mov r18,r25 - mov r0,r31 - com r0 - and r18,r0 - eor r18,r30 - mov r19,r16 - mov r0,r25 - com r0 - and r19,r0 - eor r19,r31 - mov r20,r17 - mov r0,r16 - com r0 - and r20,r0 - eor r20,r25 - mov r21,r30 - mov r0,r17 - com r0 - and r21,r0 - eor r21,r16 - mov r22,r31 - mov r0,r30 - com r0 - and r22,r0 - eor r22,r17 - mov r30,r23 - mov r31,r26 - mov r25,r27 - mov r16,r28 - mov r17,r29 - mov r23,r25 - mov r0,r31 - com r0 - and r23,r0 - eor r23,r30 - mov r26,r16 - mov r0,r25 - com r0 - and r26,r0 - eor r26,r31 - mov r27,r17 - mov r0,r16 - com r0 - and r27,r0 - eor r27,r25 - mov r28,r30 - mov r0,r17 - com r0 - and r28,r0 - eor r28,r16 - mov r29,r31 - mov r0,r30 - com r0 - and r29,r0 - eor r29,r17 - mov r30,r2 - mov r31,r3 - mov r25,r4 - mov r16,r5 - mov r17,r6 - mov r2,r25 - mov r0,r31 - com r0 - and r2,r0 - eor r2,r30 - mov r3,r16 - mov r0,r25 - com r0 - and r3,r0 - eor r3,r31 - mov r4,r17 - mov r0,r16 - com r0 - and r4,r0 - eor r4,r25 - mov r5,r30 - mov r0,r17 - com r0 - and r5,r0 - eor r5,r16 - mov r6,r31 - mov r0,r30 - com r0 - and r6,r0 - eor r6,r17 - mov r30,r7 - mov r31,r8 - mov r25,r9 - mov r16,r10 - mov r17,r11 - mov r7,r25 - mov r0,r31 - com r0 - and r7,r0 - eor r7,r30 - mov r8,r16 - mov r0,r25 - com r0 - and r8,r0 - eor r8,r31 - mov r9,r17 - mov r0,r16 - com r0 - and r9,r0 - eor r9,r25 - mov r10,r30 - mov r0,r17 - com r0 - and r10,r0 - eor r10,r16 - mov r11,r31 - mov r0,r30 - com r0 - and r11,r0 - eor r11,r17 - mov r30,r12 - mov r31,r13 - mov r25,r14 - mov r16,r15 - mov r17,r24 - mov r12,r25 - mov r0,r31 - com r0 - and r12,r0 - eor r12,r30 - mov r13,r16 - mov r0,r25 - com r0 - and r13,r0 - eor r13,r31 - mov r14,r17 - mov r0,r16 - com r0 - and r14,r0 - eor r14,r25 - mov r15,r30 - mov r0,r17 - com r0 - and r15,r0 - eor r15,r16 - mov r24,r31 - mov r0,r30 - com r0 - and r24,r0 - eor r24,r17 - ret -420: - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - std Z+24,r24 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size keccakp_200_permute, .-keccakp_200_permute - - .text -.global keccakp_400_permute - .type keccakp_400_permute, @function -keccakp_400_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - movw r30,r24 -.L__stack_usage = 17 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - ldd r10,Z+4 - ldd r11,Z+5 - ldd r12,Z+6 - ldd r13,Z+7 - ldd r14,Z+8 - ldd r15,Z+9 - cpi r22,20 - brcs 15f - rcall 153f - ldi r23,1 - eor r6,r23 -15: - cpi r22,19 - brcs 23f - rcall 153f - ldi r23,130 - eor r6,r23 - ldi r17,128 - eor r7,r17 -23: - cpi r22,18 - brcs 31f - rcall 153f - ldi r23,138 - eor r6,r23 - ldi r17,128 - eor r7,r17 -31: - cpi r22,17 - brcs 37f - rcall 153f - ldi r23,128 - eor r7,r23 -37: - cpi r22,16 - brcs 45f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -45: - cpi r22,15 - brcs 51f - rcall 153f - ldi r23,1 - eor r6,r23 -51: - cpi r22,14 - brcs 59f - rcall 153f - ldi r23,129 - eor r6,r23 - ldi r17,128 - eor r7,r17 -59: - cpi r22,13 - brcs 67f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -67: - cpi r22,12 - brcs 73f - rcall 153f - ldi r23,138 - eor r6,r23 -73: - cpi r22,11 - brcs 79f - rcall 153f - ldi r23,136 - eor r6,r23 -79: - cpi r22,10 - brcs 87f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -87: - cpi r22,9 - brcs 93f - rcall 153f - ldi r23,10 - eor r6,r23 -93: - cpi r22,8 - brcs 101f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -101: - cpi r22,7 - brcs 107f - rcall 153f - ldi r23,139 - eor r6,r23 -107: - cpi r22,6 - brcs 115f - rcall 153f - ldi r23,137 - eor r6,r23 - ldi r17,128 - eor r7,r17 -115: - cpi r22,5 - brcs 123f - rcall 153f - ldi r23,3 - eor r6,r23 - ldi r17,128 - eor r7,r17 -123: - cpi r22,4 - brcs 131f - rcall 153f - ldi r23,2 - eor r6,r23 - ldi r17,128 - eor r7,r17 -131: - cpi r22,3 - brcs 137f - rcall 153f - ldi r23,128 - eor r6,r23 -137: - cpi r22,2 - brcs 145f - rcall 153f - ldi r23,10 - eor r6,r23 - ldi r17,128 - eor r7,r17 -145: - cpi r22,1 - brcs 151f - rcall 153f - ldi r23,10 - eor r6,r23 -151: - rjmp 1004f -153: - movw r18,r6 - ldd r0,Z+10 - eor r18,r0 - ldd r0,Z+11 - eor r19,r0 - ldd r0,Z+20 - eor r18,r0 - ldd r0,Z+21 - eor r19,r0 - ldd r0,Z+30 - eor r18,r0 - ldd r0,Z+31 - eor r19,r0 - ldd r0,Z+40 - eor r18,r0 - ldd r0,Z+41 - eor r19,r0 - movw r20,r8 - ldd r0,Z+12 - eor r20,r0 - ldd r0,Z+13 - eor r21,r0 - ldd r0,Z+22 - eor r20,r0 - ldd r0,Z+23 - eor r21,r0 - ldd r0,Z+32 - eor r20,r0 - ldd r0,Z+33 - eor r21,r0 - ldd r0,Z+42 - eor r20,r0 - ldd r0,Z+43 - eor r21,r0 - movw r26,r10 - ldd r0,Z+14 - eor r26,r0 - ldd r0,Z+15 - eor r27,r0 - ldd r0,Z+24 - eor r26,r0 - ldd r0,Z+25 - eor r27,r0 - ldd r0,Z+34 - eor r26,r0 - ldd r0,Z+35 - eor r27,r0 - ldd r0,Z+44 - eor r26,r0 - ldd r0,Z+45 - eor r27,r0 - movw r2,r12 - ldd r0,Z+16 - eor r2,r0 - ldd r0,Z+17 - eor r3,r0 - ldd r0,Z+26 - eor r2,r0 - ldd r0,Z+27 - eor r3,r0 - ldd r0,Z+36 - eor r2,r0 - ldd r0,Z+37 - eor r3,r0 - ldd r0,Z+46 - eor r2,r0 - ldd r0,Z+47 - eor r3,r0 - movw r4,r14 - ldd r0,Z+18 - eor r4,r0 - ldd r0,Z+19 - eor r5,r0 - ldd r0,Z+28 - eor r4,r0 - ldd r0,Z+29 - eor r5,r0 - ldd r0,Z+38 - eor r4,r0 - ldd r0,Z+39 - eor r5,r0 - ldd r0,Z+48 - eor r4,r0 - ldd r0,Z+49 - eor r5,r0 - movw r24,r20 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r4 - eor r25,r5 - eor r6,r24 - eor r7,r25 - ldd r0,Z+10 - eor r0,r24 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r25 - std Z+11,r0 - ldd r0,Z+20 - eor r0,r24 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r25 - std Z+21,r0 - ldd r0,Z+30 - eor r0,r24 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r25 - std Z+31,r0 - ldd r0,Z+40 - eor r0,r24 - std Z+40,r0 - ldd r0,Z+41 - eor r0,r25 - std Z+41,r0 - movw r24,r26 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r8,r24 - eor r9,r25 - ldd r0,Z+12 - eor r0,r24 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r25 - std Z+13,r0 - ldd r0,Z+22 - eor r0,r24 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r25 - std Z+23,r0 - ldd r0,Z+32 - eor r0,r24 - std Z+32,r0 - ldd r0,Z+33 - eor r0,r25 - std Z+33,r0 - ldd r0,Z+42 - eor r0,r24 - std Z+42,r0 - ldd r0,Z+43 - eor r0,r25 - std Z+43,r0 - movw r24,r2 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r10,r24 - eor r11,r25 - ldd r0,Z+14 - eor r0,r24 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r25 - std Z+15,r0 - ldd r0,Z+24 - eor r0,r24 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r25 - std Z+25,r0 - ldd r0,Z+34 - eor r0,r24 - std Z+34,r0 - ldd r0,Z+35 - eor r0,r25 - std Z+35,r0 - ldd r0,Z+44 - eor r0,r24 - std Z+44,r0 - ldd r0,Z+45 - eor r0,r25 - std Z+45,r0 - movw r24,r4 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r26 - eor r25,r27 - eor r12,r24 - eor r13,r25 - ldd r0,Z+16 - eor r0,r24 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r25 - std Z+17,r0 - ldd r0,Z+26 - eor r0,r24 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r25 - std Z+27,r0 - ldd r0,Z+36 - eor r0,r24 - std Z+36,r0 - ldd r0,Z+37 - eor r0,r25 - std Z+37,r0 - ldd r0,Z+46 - eor r0,r24 - std Z+46,r0 - ldd r0,Z+47 - eor r0,r25 - std Z+47,r0 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r2 - eor r25,r3 - eor r14,r24 - eor r15,r25 - ldd r0,Z+18 - eor r0,r24 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r25 - std Z+19,r0 - ldd r0,Z+28 - eor r0,r24 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r25 - std Z+29,r0 - ldd r0,Z+38 - eor r0,r24 - std Z+38,r0 - ldd r0,Z+39 - eor r0,r25 - std Z+39,r0 - ldd r0,Z+48 - eor r0,r24 - std Z+48,r0 - ldd r0,Z+49 - eor r0,r25 - std Z+49,r0 - movw r24,r8 - ldd r8,Z+12 - ldd r9,Z+13 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldd r18,Z+18 - ldd r19,Z+19 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+12,r18 - std Z+13,r19 - ldd r18,Z+44 - ldd r19,Z+45 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+18,r18 - std Z+19,r19 - ldd r18,Z+28 - ldd r19,Z+29 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+44,r18 - std Z+45,r19 - ldd r18,Z+40 - ldd r19,Z+41 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+28,r18 - std Z+29,r19 - movw r18,r10 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+40,r18 - std Z+41,r19 - ldd r10,Z+24 - ldd r11,Z+25 - mov r0,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldd r18,Z+26 - ldd r19,Z+27 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+24,r18 - std Z+25,r19 - ldd r18,Z+38 - ldd r19,Z+39 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+26,r18 - std Z+27,r19 - ldd r18,Z+46 - ldd r19,Z+47 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+38,r18 - std Z+39,r19 - ldd r18,Z+30 - ldd r19,Z+31 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+46,r18 - std Z+47,r19 - movw r18,r14 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+30,r18 - std Z+31,r19 - ldd r14,Z+48 - ldd r15,Z+49 - mov r0,r1 - lsr r15 - ror r14 - ror r0 - lsr r15 - ror r14 - ror r0 - or r15,r0 - ldd r18,Z+42 - ldd r19,Z+43 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+48,r18 - std Z+49,r19 - ldd r18,Z+16 - ldd r19,Z+17 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+42,r18 - std Z+43,r19 - ldd r18,Z+32 - ldd r19,Z+33 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+16,r18 - std Z+17,r19 - ldd r18,Z+10 - ldd r19,Z+11 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+32,r18 - std Z+33,r19 - movw r18,r12 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+10,r18 - std Z+11,r19 - ldd r12,Z+36 - ldd r13,Z+37 - mov r0,r13 - mov r13,r12 - mov r12,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - ldd r18,Z+34 - ldd r19,Z+35 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+36,r18 - std Z+37,r19 - ldd r18,Z+22 - ldd r19,Z+23 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+34,r18 - std Z+35,r19 - ldd r18,Z+14 - ldd r19,Z+15 - mov r0,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+22,r18 - std Z+23,r19 - ldd r18,Z+20 - ldd r19,Z+21 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+14,r18 - std Z+15,r19 - lsl r24 - rol r25 - adc r24,r1 - std Z+20,r24 - std Z+21,r25 - movw r18,r6 - movw r20,r8 - movw r26,r10 - movw r2,r12 - movw r4,r14 - movw r6,r26 - mov r0,r20 - com r0 - and r6,r0 - mov r0,r21 - com r0 - and r7,r0 - eor r6,r18 - eor r7,r19 - movw r8,r2 - mov r0,r26 - com r0 - and r8,r0 - mov r0,r27 - com r0 - and r9,r0 - eor r8,r20 - eor r9,r21 - movw r10,r4 - mov r0,r2 - com r0 - and r10,r0 - mov r0,r3 - com r0 - and r11,r0 - eor r10,r26 - eor r11,r27 - movw r12,r18 - mov r0,r4 - com r0 - and r12,r0 - mov r0,r5 - com r0 - and r13,r0 - eor r12,r2 - eor r13,r3 - movw r14,r20 - mov r0,r18 - com r0 - and r14,r0 - mov r0,r19 - com r0 - and r15,r0 - eor r14,r4 - eor r15,r5 - ldd r18,Z+10 - ldd r19,Z+11 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+10,r24 - std Z+11,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+12,r24 - std Z+13,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+14,r24 - std Z+15,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+16,r24 - std Z+17,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+18,r24 - std Z+19,r25 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+20,r24 - std Z+21,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+22,r24 - std Z+23,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+24,r24 - std Z+25,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+26,r24 - std Z+27,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+28,r24 - std Z+29,r25 - ldd r18,Z+30 - ldd r19,Z+31 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+30,r24 - std Z+31,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+32,r24 - std Z+33,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+34,r24 - std Z+35,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+36,r24 - std Z+37,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+38,r24 - std Z+39,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - ldd r4,Z+48 - ldd r5,Z+49 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+40,r24 - std Z+41,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+42,r24 - std Z+43,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+44,r24 - std Z+45,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+46,r24 - std Z+47,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+48,r24 - std Z+49,r25 - ret -1004: - st Z,r6 - std Z+1,r7 - std Z+2,r8 - std Z+3,r9 - std Z+4,r10 - std Z+5,r11 - std Z+6,r12 - std Z+7,r13 - std Z+8,r14 - std Z+9,r15 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size keccakp_400_permute, .-keccakp_400_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.c b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.c deleted file mode 100644 index 60539df..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-keccak.h" - -#if !defined(__AVR__) - -/* Faster method to compute ((x + y) % 5) that avoids the division */ -static unsigned char const addMod5Table[9] = { - 0, 1, 2, 3, 4, 0, 1, 2, 3 -}; -#define addMod5(x, y) (addMod5Table[(x) + (y)]) - -void keccakp_200_permute(keccakp_200_state_t *state) -{ - static uint8_t const RC[18] = { - 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, - 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, - 0x02, 0x80 - }; - uint8_t C[5]; - uint8_t D; - unsigned round; - unsigned index, index2; - for (round = 0; round < 18; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_8(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate4_8(state->A[1][1]); - state->A[1][1] = leftRotate4_8(state->A[1][4]); - state->A[1][4] = leftRotate5_8(state->A[4][2]); - state->A[4][2] = leftRotate7_8(state->A[2][4]); - state->A[2][4] = leftRotate2_8(state->A[4][0]); - state->A[4][0] = leftRotate6_8(state->A[0][2]); - state->A[0][2] = leftRotate3_8(state->A[2][2]); - state->A[2][2] = leftRotate1_8(state->A[2][3]); - state->A[2][3] = state->A[3][4]; - state->A[3][4] = state->A[4][3]; - state->A[4][3] = leftRotate1_8(state->A[3][0]); - state->A[3][0] = leftRotate3_8(state->A[0][4]); - state->A[0][4] = leftRotate6_8(state->A[4][4]); - state->A[4][4] = leftRotate2_8(state->A[4][1]); - state->A[4][1] = leftRotate7_8(state->A[1][3]); - state->A[1][3] = leftRotate5_8(state->A[3][1]); - state->A[3][1] = leftRotate4_8(state->A[1][0]); - state->A[1][0] = leftRotate4_8(state->A[0][3]); - state->A[0][3] = leftRotate5_8(state->A[3][3]); - state->A[3][3] = leftRotate7_8(state->A[3][2]); - state->A[3][2] = leftRotate2_8(state->A[2][1]); - state->A[2][1] = leftRotate6_8(state->A[1][2]); - state->A[1][2] = leftRotate3_8(state->A[2][0]); - state->A[2][0] = leftRotate1_8(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define keccakp_400_permute_host keccakp_400_permute -#endif - -/* Keccak-p[400] that assumes that the input is already in host byte order */ -void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) -{ - static uint16_t const RC[20] = { - 0x0001, 0x8082, 0x808A, 0x8000, 0x808B, 0x0001, 0x8081, 0x8009, - 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, - 0x8002, 0x0080, 0x800A, 0x000A - }; - uint16_t C[5]; - uint16_t D; - unsigned round; - unsigned index, index2; - for (round = 20 - rounds; round < 20; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_16(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate12_16(state->A[1][1]); - state->A[1][1] = leftRotate4_16 (state->A[1][4]); - state->A[1][4] = leftRotate13_16(state->A[4][2]); - state->A[4][2] = leftRotate7_16 (state->A[2][4]); - state->A[2][4] = leftRotate2_16 (state->A[4][0]); - state->A[4][0] = leftRotate14_16(state->A[0][2]); - state->A[0][2] = leftRotate11_16(state->A[2][2]); - state->A[2][2] = leftRotate9_16 (state->A[2][3]); - state->A[2][3] = leftRotate8_16 (state->A[3][4]); - state->A[3][4] = leftRotate8_16 (state->A[4][3]); - state->A[4][3] = leftRotate9_16 (state->A[3][0]); - state->A[3][0] = leftRotate11_16(state->A[0][4]); - state->A[0][4] = leftRotate14_16(state->A[4][4]); - state->A[4][4] = leftRotate2_16 (state->A[4][1]); - state->A[4][1] = leftRotate7_16 (state->A[1][3]); - state->A[1][3] = leftRotate13_16(state->A[3][1]); - state->A[3][1] = leftRotate4_16 (state->A[1][0]); - state->A[1][0] = leftRotate12_16(state->A[0][3]); - state->A[0][3] = leftRotate5_16 (state->A[3][3]); - state->A[3][3] = leftRotate15_16(state->A[3][2]); - state->A[3][2] = leftRotate10_16(state->A[2][1]); - state->A[2][1] = leftRotate6_16 (state->A[1][2]); - state->A[1][2] = leftRotate3_16 (state->A[2][0]); - state->A[2][0] = leftRotate1_16(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if !defined(LW_UTIL_LITTLE_ENDIAN) - -/** - * \brief Reverses the bytes in a Keccak-p[400] state. - * - * \param state The Keccak-p[400] state to apply byte-reversal to. - */ -static void keccakp_400_reverse_bytes(keccakp_400_state_t *state) -{ - unsigned index; - unsigned char temp1; - unsigned char temp2; - for (index = 0; index < 50; index += 2) { - temp1 = state->B[index]; - temp2 = state->B[index + 1]; - state->B[index] = temp2; - state->B[index + 1] = temp1; - } -} - -/* Keccak-p[400] that requires byte reversal on input and output */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) -{ - keccakp_400_reverse_bytes(state); - keccakp_400_permute_host(state, rounds); - keccakp_400_reverse_bytes(state); -} - -#endif - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.h b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.h deleted file mode 100644 index 2ffef42..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-keccak.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KECCAK_H -#define LW_INTERNAL_KECCAK_H - -#include "internal-util.h" - -/** - * \file internal-keccak.h - * \brief Internal implementation of the Keccak-p permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for the Keccak-p[200] permutation. - */ -#define KECCAKP_200_STATE_SIZE 25 - -/** - * \brief Size of the state for the Keccak-p[400] permutation. - */ -#define KECCAKP_400_STATE_SIZE 50 - -/** - * \brief Structure of the internal state of the Keccak-p[200] permutation. - */ -typedef union -{ - uint8_t A[5][5]; /**< Keccak-p[200] state as a 5x5 array of lanes */ - uint8_t B[25]; /**< Keccak-p[200] state as a byte array */ - -} keccakp_200_state_t; - -/** - * \brief Structure of the internal state of the Keccak-p[400] permutation. - */ -typedef union -{ - uint16_t A[5][5]; /**< Keccak-p[400] state as a 5x5 array of lanes */ - uint8_t B[50]; /**< Keccak-p[400] state as a byte array */ - -} keccakp_400_state_t; - -/** - * \brief Permutes the Keccak-p[200] state. - * - * \param state The Keccak-p[200] state to be permuted. - */ -void keccakp_200_permute(keccakp_200_state_t *state); - -/** - * \brief Permutes the Keccak-p[400] state, which is assumed to be in - * little-endian byte order. - * - * \param state The Keccak-p[400] state to be permuted. - * \param rounds The number of rounds to perform (up to 20). - */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-util.h b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.c b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.c deleted file mode 100644 index 26d50a3..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "isap.h" -#include "internal-keccak.h" -#include "internal-ascon.h" -#include - -aead_cipher_t const isap_keccak_128a_cipher = { - "ISAP-K-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128a_aead_encrypt, - isap_keccak_128a_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128a_cipher = { - "ISAP-A-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128a_aead_encrypt, - isap_ascon_128a_aead_decrypt -}; - -aead_cipher_t const isap_keccak_128_cipher = { - "ISAP-K-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128_aead_encrypt, - isap_keccak_128_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128_cipher = { - "ISAP-A-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128_aead_encrypt, - isap_ascon_128_aead_decrypt -}; - -/* ISAP-K-128A */ -#define ISAP_ALG_NAME isap_keccak_128a -#define ISAP_RATE (144 / 8) -#define ISAP_sH 16 -#define ISAP_sE 8 -#define ISAP_sB 1 -#define ISAP_sK 8 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128A */ -#define ISAP_ALG_NAME isap_ascon_128a -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 6 -#define ISAP_sB 1 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" - -/* ISAP-K-128 */ -#define ISAP_ALG_NAME isap_keccak_128 -#define ISAP_RATE (144 / 8) -#define ISAP_sH 20 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128 */ -#define ISAP_ALG_NAME isap_ascon_128 -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.h b/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.h deleted file mode 100644 index ddf8203..0000000 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys-avr/isap.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ISAP_H -#define LWCRYPTO_ISAP_H - -#include "aead-common.h" - -/** - * \file isap.h - * \brief ISAP authenticated encryption algorithm. - * - * ISAP is a family of authenticated encryption algorithms that are built - * around the Keccak-p[400] or ASCON permutations. There are four algorithms - * in the family, each of which have a 128-bit key, a 128-bit nonce, and a - * 128-bit tag: - * - * \li ISAP-K-128A based around the Keccak-p[400] permutation with a - * reduced number of rounds. This is the primary member in the family. - * \li ISAP-A-128A based around the ASCON permutation with a reduced - * number of rounds. - * \li ISAP-K-128 based around the Keccak-p[400] permutation. - * \li ISAP-A-128 based around the ASCON permutation. - * - * ISAP is designed to provide some protection against adversaries - * using differential power analysis to determine the key. The - * downside is that key setup is very slow. - * - * References: https://isap.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all ISAP family members. - */ -#define ISAP_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all ISAP family members. - */ -#define ISAP_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all ISAP family members. - */ -#define ISAP_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the ISAP-K-128A cipher. - */ -extern aead_cipher_t const isap_keccak_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128A cipher. - */ -extern aead_cipher_t const isap_ascon_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-K-128 cipher. - */ -extern aead_cipher_t const isap_keccak_128_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128 cipher. - */ -extern aead_cipher_t const isap_ascon_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128a_aead_decrypt() - */ -int isap_keccak_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128a_aead_encrypt() - */ -int isap_keccak_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128a_aead_decrypt() - */ -int isap_ascon_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128a_aead_encrypt() - */ -int isap_ascon_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128_aead_decrypt() - */ -int isap_keccak_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128_aead_encrypt() - */ -int isap_keccak_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128_aead_decrypt() - */ -int isap_ascon_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128_aead_encrypt() - */ -int isap_ascon_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon.c b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon.c index 12a8ec6..657aabe 100644 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon.c +++ b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-ascon.c @@ -22,6 +22,8 @@ #include "internal-ascon.h" +#if !defined(__AVR__) + void ascon_permute(ascon_state_t *state, uint8_t first_round) { uint64_t t0, t1, t2, t3, t4; @@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round) state->S[4] = x4; #endif } + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak-avr.S new file mode 100644 index 0000000..e50ccaf --- /dev/null +++ b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak-avr.S @@ -0,0 +1,1552 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global keccakp_200_permute + .type keccakp_200_permute, @function +keccakp_200_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + ldd r24,Z+24 + push r31 + push r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,130 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + mov r30,r1 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,129 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + ldi r30,136 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,10 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,137 + eor r18,r30 + rcall 82f + ldi r30,3 + eor r18,r30 + rcall 82f + ldi r30,2 + eor r18,r30 + rcall 82f + ldi r30,128 + eor r18,r30 + rjmp 420f +82: + mov r30,r18 + eor r30,r23 + eor r30,r2 + eor r30,r7 + eor r30,r12 + mov r31,r19 + eor r31,r26 + eor r31,r3 + eor r31,r8 + eor r31,r13 + mov r25,r20 + eor r25,r27 + eor r25,r4 + eor r25,r9 + eor r25,r14 + mov r16,r21 + eor r16,r28 + eor r16,r5 + eor r16,r10 + eor r16,r15 + mov r17,r22 + eor r17,r29 + eor r17,r6 + eor r17,r11 + eor r17,r24 + mov r0,r31 + lsl r0 + adc r0,r1 + eor r0,r17 + eor r18,r0 + eor r23,r0 + eor r2,r0 + eor r7,r0 + eor r12,r0 + mov r0,r25 + lsl r0 + adc r0,r1 + eor r0,r30 + eor r19,r0 + eor r26,r0 + eor r3,r0 + eor r8,r0 + eor r13,r0 + mov r0,r16 + lsl r0 + adc r0,r1 + eor r0,r31 + eor r20,r0 + eor r27,r0 + eor r4,r0 + eor r9,r0 + eor r14,r0 + mov r0,r17 + lsl r0 + adc r0,r1 + eor r0,r25 + eor r21,r0 + eor r28,r0 + eor r5,r0 + eor r10,r0 + eor r15,r0 + mov r0,r30 + lsl r0 + adc r0,r1 + eor r0,r16 + eor r22,r0 + eor r29,r0 + eor r6,r0 + eor r11,r0 + eor r24,r0 + mov r30,r19 + swap r26 + mov r19,r26 + swap r29 + mov r26,r29 + mov r0,r1 + lsr r14 + ror r0 + lsr r14 + ror r0 + lsr r14 + ror r0 + or r14,r0 + mov r29,r14 + bst r6,0 + lsr r6 + bld r6,7 + mov r14,r6 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + mov r6,r12 + mov r0,r1 + lsr r20 + ror r0 + lsr r20 + ror r0 + or r20,r0 + mov r12,r20 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + mov r20,r4 + lsl r5 + adc r5,r1 + mov r4,r5 + mov r5,r11 + mov r11,r15 + lsl r7 + adc r7,r1 + mov r15,r7 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + mov r7,r22 + mov r0,r1 + lsr r24 + ror r0 + lsr r24 + ror r0 + or r24,r0 + mov r22,r24 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r24,r13 + bst r28,0 + lsr r28 + bld r28,7 + mov r13,r28 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r28,r8 + swap r23 + mov r8,r23 + swap r21 + mov r23,r21 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r21,r10 + bst r9,0 + lsr r9 + bld r9,7 + mov r10,r9 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + mov r9,r3 + mov r0,r1 + lsr r27 + ror r0 + lsr r27 + ror r0 + or r27,r0 + mov r3,r27 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + mov r27,r2 + lsl r30 + adc r30,r1 + mov r2,r30 + mov r30,r18 + mov r31,r19 + mov r25,r20 + mov r16,r21 + mov r17,r22 + mov r18,r25 + mov r0,r31 + com r0 + and r18,r0 + eor r18,r30 + mov r19,r16 + mov r0,r25 + com r0 + and r19,r0 + eor r19,r31 + mov r20,r17 + mov r0,r16 + com r0 + and r20,r0 + eor r20,r25 + mov r21,r30 + mov r0,r17 + com r0 + and r21,r0 + eor r21,r16 + mov r22,r31 + mov r0,r30 + com r0 + and r22,r0 + eor r22,r17 + mov r30,r23 + mov r31,r26 + mov r25,r27 + mov r16,r28 + mov r17,r29 + mov r23,r25 + mov r0,r31 + com r0 + and r23,r0 + eor r23,r30 + mov r26,r16 + mov r0,r25 + com r0 + and r26,r0 + eor r26,r31 + mov r27,r17 + mov r0,r16 + com r0 + and r27,r0 + eor r27,r25 + mov r28,r30 + mov r0,r17 + com r0 + and r28,r0 + eor r28,r16 + mov r29,r31 + mov r0,r30 + com r0 + and r29,r0 + eor r29,r17 + mov r30,r2 + mov r31,r3 + mov r25,r4 + mov r16,r5 + mov r17,r6 + mov r2,r25 + mov r0,r31 + com r0 + and r2,r0 + eor r2,r30 + mov r3,r16 + mov r0,r25 + com r0 + and r3,r0 + eor r3,r31 + mov r4,r17 + mov r0,r16 + com r0 + and r4,r0 + eor r4,r25 + mov r5,r30 + mov r0,r17 + com r0 + and r5,r0 + eor r5,r16 + mov r6,r31 + mov r0,r30 + com r0 + and r6,r0 + eor r6,r17 + mov r30,r7 + mov r31,r8 + mov r25,r9 + mov r16,r10 + mov r17,r11 + mov r7,r25 + mov r0,r31 + com r0 + and r7,r0 + eor r7,r30 + mov r8,r16 + mov r0,r25 + com r0 + and r8,r0 + eor r8,r31 + mov r9,r17 + mov r0,r16 + com r0 + and r9,r0 + eor r9,r25 + mov r10,r30 + mov r0,r17 + com r0 + and r10,r0 + eor r10,r16 + mov r11,r31 + mov r0,r30 + com r0 + and r11,r0 + eor r11,r17 + mov r30,r12 + mov r31,r13 + mov r25,r14 + mov r16,r15 + mov r17,r24 + mov r12,r25 + mov r0,r31 + com r0 + and r12,r0 + eor r12,r30 + mov r13,r16 + mov r0,r25 + com r0 + and r13,r0 + eor r13,r31 + mov r14,r17 + mov r0,r16 + com r0 + and r14,r0 + eor r14,r25 + mov r15,r30 + mov r0,r17 + com r0 + and r15,r0 + eor r15,r16 + mov r24,r31 + mov r0,r30 + com r0 + and r24,r0 + eor r24,r17 + ret +420: + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + std Z+24,r24 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size keccakp_200_permute, .-keccakp_200_permute + + .text +.global keccakp_400_permute + .type keccakp_400_permute, @function +keccakp_400_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + movw r30,r24 +.L__stack_usage = 17 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + ldd r10,Z+4 + ldd r11,Z+5 + ldd r12,Z+6 + ldd r13,Z+7 + ldd r14,Z+8 + ldd r15,Z+9 + cpi r22,20 + brcs 15f + rcall 153f + ldi r23,1 + eor r6,r23 +15: + cpi r22,19 + brcs 23f + rcall 153f + ldi r23,130 + eor r6,r23 + ldi r17,128 + eor r7,r17 +23: + cpi r22,18 + brcs 31f + rcall 153f + ldi r23,138 + eor r6,r23 + ldi r17,128 + eor r7,r17 +31: + cpi r22,17 + brcs 37f + rcall 153f + ldi r23,128 + eor r7,r23 +37: + cpi r22,16 + brcs 45f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +45: + cpi r22,15 + brcs 51f + rcall 153f + ldi r23,1 + eor r6,r23 +51: + cpi r22,14 + brcs 59f + rcall 153f + ldi r23,129 + eor r6,r23 + ldi r17,128 + eor r7,r17 +59: + cpi r22,13 + brcs 67f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +67: + cpi r22,12 + brcs 73f + rcall 153f + ldi r23,138 + eor r6,r23 +73: + cpi r22,11 + brcs 79f + rcall 153f + ldi r23,136 + eor r6,r23 +79: + cpi r22,10 + brcs 87f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +87: + cpi r22,9 + brcs 93f + rcall 153f + ldi r23,10 + eor r6,r23 +93: + cpi r22,8 + brcs 101f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +101: + cpi r22,7 + brcs 107f + rcall 153f + ldi r23,139 + eor r6,r23 +107: + cpi r22,6 + brcs 115f + rcall 153f + ldi r23,137 + eor r6,r23 + ldi r17,128 + eor r7,r17 +115: + cpi r22,5 + brcs 123f + rcall 153f + ldi r23,3 + eor r6,r23 + ldi r17,128 + eor r7,r17 +123: + cpi r22,4 + brcs 131f + rcall 153f + ldi r23,2 + eor r6,r23 + ldi r17,128 + eor r7,r17 +131: + cpi r22,3 + brcs 137f + rcall 153f + ldi r23,128 + eor r6,r23 +137: + cpi r22,2 + brcs 145f + rcall 153f + ldi r23,10 + eor r6,r23 + ldi r17,128 + eor r7,r17 +145: + cpi r22,1 + brcs 151f + rcall 153f + ldi r23,10 + eor r6,r23 +151: + rjmp 1004f +153: + movw r18,r6 + ldd r0,Z+10 + eor r18,r0 + ldd r0,Z+11 + eor r19,r0 + ldd r0,Z+20 + eor r18,r0 + ldd r0,Z+21 + eor r19,r0 + ldd r0,Z+30 + eor r18,r0 + ldd r0,Z+31 + eor r19,r0 + ldd r0,Z+40 + eor r18,r0 + ldd r0,Z+41 + eor r19,r0 + movw r20,r8 + ldd r0,Z+12 + eor r20,r0 + ldd r0,Z+13 + eor r21,r0 + ldd r0,Z+22 + eor r20,r0 + ldd r0,Z+23 + eor r21,r0 + ldd r0,Z+32 + eor r20,r0 + ldd r0,Z+33 + eor r21,r0 + ldd r0,Z+42 + eor r20,r0 + ldd r0,Z+43 + eor r21,r0 + movw r26,r10 + ldd r0,Z+14 + eor r26,r0 + ldd r0,Z+15 + eor r27,r0 + ldd r0,Z+24 + eor r26,r0 + ldd r0,Z+25 + eor r27,r0 + ldd r0,Z+34 + eor r26,r0 + ldd r0,Z+35 + eor r27,r0 + ldd r0,Z+44 + eor r26,r0 + ldd r0,Z+45 + eor r27,r0 + movw r2,r12 + ldd r0,Z+16 + eor r2,r0 + ldd r0,Z+17 + eor r3,r0 + ldd r0,Z+26 + eor r2,r0 + ldd r0,Z+27 + eor r3,r0 + ldd r0,Z+36 + eor r2,r0 + ldd r0,Z+37 + eor r3,r0 + ldd r0,Z+46 + eor r2,r0 + ldd r0,Z+47 + eor r3,r0 + movw r4,r14 + ldd r0,Z+18 + eor r4,r0 + ldd r0,Z+19 + eor r5,r0 + ldd r0,Z+28 + eor r4,r0 + ldd r0,Z+29 + eor r5,r0 + ldd r0,Z+38 + eor r4,r0 + ldd r0,Z+39 + eor r5,r0 + ldd r0,Z+48 + eor r4,r0 + ldd r0,Z+49 + eor r5,r0 + movw r24,r20 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r4 + eor r25,r5 + eor r6,r24 + eor r7,r25 + ldd r0,Z+10 + eor r0,r24 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r25 + std Z+11,r0 + ldd r0,Z+20 + eor r0,r24 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r25 + std Z+21,r0 + ldd r0,Z+30 + eor r0,r24 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r25 + std Z+31,r0 + ldd r0,Z+40 + eor r0,r24 + std Z+40,r0 + ldd r0,Z+41 + eor r0,r25 + std Z+41,r0 + movw r24,r26 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r8,r24 + eor r9,r25 + ldd r0,Z+12 + eor r0,r24 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r25 + std Z+13,r0 + ldd r0,Z+22 + eor r0,r24 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r25 + std Z+23,r0 + ldd r0,Z+32 + eor r0,r24 + std Z+32,r0 + ldd r0,Z+33 + eor r0,r25 + std Z+33,r0 + ldd r0,Z+42 + eor r0,r24 + std Z+42,r0 + ldd r0,Z+43 + eor r0,r25 + std Z+43,r0 + movw r24,r2 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r10,r24 + eor r11,r25 + ldd r0,Z+14 + eor r0,r24 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r25 + std Z+15,r0 + ldd r0,Z+24 + eor r0,r24 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r25 + std Z+25,r0 + ldd r0,Z+34 + eor r0,r24 + std Z+34,r0 + ldd r0,Z+35 + eor r0,r25 + std Z+35,r0 + ldd r0,Z+44 + eor r0,r24 + std Z+44,r0 + ldd r0,Z+45 + eor r0,r25 + std Z+45,r0 + movw r24,r4 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r26 + eor r25,r27 + eor r12,r24 + eor r13,r25 + ldd r0,Z+16 + eor r0,r24 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r25 + std Z+17,r0 + ldd r0,Z+26 + eor r0,r24 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r25 + std Z+27,r0 + ldd r0,Z+36 + eor r0,r24 + std Z+36,r0 + ldd r0,Z+37 + eor r0,r25 + std Z+37,r0 + ldd r0,Z+46 + eor r0,r24 + std Z+46,r0 + ldd r0,Z+47 + eor r0,r25 + std Z+47,r0 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r2 + eor r25,r3 + eor r14,r24 + eor r15,r25 + ldd r0,Z+18 + eor r0,r24 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r25 + std Z+19,r0 + ldd r0,Z+28 + eor r0,r24 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r25 + std Z+29,r0 + ldd r0,Z+38 + eor r0,r24 + std Z+38,r0 + ldd r0,Z+39 + eor r0,r25 + std Z+39,r0 + ldd r0,Z+48 + eor r0,r24 + std Z+48,r0 + ldd r0,Z+49 + eor r0,r25 + std Z+49,r0 + movw r24,r8 + ldd r8,Z+12 + ldd r9,Z+13 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldd r18,Z+18 + ldd r19,Z+19 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+12,r18 + std Z+13,r19 + ldd r18,Z+44 + ldd r19,Z+45 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+18,r18 + std Z+19,r19 + ldd r18,Z+28 + ldd r19,Z+29 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+44,r18 + std Z+45,r19 + ldd r18,Z+40 + ldd r19,Z+41 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+28,r18 + std Z+29,r19 + movw r18,r10 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+40,r18 + std Z+41,r19 + ldd r10,Z+24 + ldd r11,Z+25 + mov r0,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldd r18,Z+26 + ldd r19,Z+27 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+24,r18 + std Z+25,r19 + ldd r18,Z+38 + ldd r19,Z+39 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+26,r18 + std Z+27,r19 + ldd r18,Z+46 + ldd r19,Z+47 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+38,r18 + std Z+39,r19 + ldd r18,Z+30 + ldd r19,Z+31 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+46,r18 + std Z+47,r19 + movw r18,r14 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+30,r18 + std Z+31,r19 + ldd r14,Z+48 + ldd r15,Z+49 + mov r0,r1 + lsr r15 + ror r14 + ror r0 + lsr r15 + ror r14 + ror r0 + or r15,r0 + ldd r18,Z+42 + ldd r19,Z+43 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+48,r18 + std Z+49,r19 + ldd r18,Z+16 + ldd r19,Z+17 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+42,r18 + std Z+43,r19 + ldd r18,Z+32 + ldd r19,Z+33 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+16,r18 + std Z+17,r19 + ldd r18,Z+10 + ldd r19,Z+11 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+32,r18 + std Z+33,r19 + movw r18,r12 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+10,r18 + std Z+11,r19 + ldd r12,Z+36 + ldd r13,Z+37 + mov r0,r13 + mov r13,r12 + mov r12,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + ldd r18,Z+34 + ldd r19,Z+35 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+36,r18 + std Z+37,r19 + ldd r18,Z+22 + ldd r19,Z+23 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+34,r18 + std Z+35,r19 + ldd r18,Z+14 + ldd r19,Z+15 + mov r0,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+22,r18 + std Z+23,r19 + ldd r18,Z+20 + ldd r19,Z+21 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+14,r18 + std Z+15,r19 + lsl r24 + rol r25 + adc r24,r1 + std Z+20,r24 + std Z+21,r25 + movw r18,r6 + movw r20,r8 + movw r26,r10 + movw r2,r12 + movw r4,r14 + movw r6,r26 + mov r0,r20 + com r0 + and r6,r0 + mov r0,r21 + com r0 + and r7,r0 + eor r6,r18 + eor r7,r19 + movw r8,r2 + mov r0,r26 + com r0 + and r8,r0 + mov r0,r27 + com r0 + and r9,r0 + eor r8,r20 + eor r9,r21 + movw r10,r4 + mov r0,r2 + com r0 + and r10,r0 + mov r0,r3 + com r0 + and r11,r0 + eor r10,r26 + eor r11,r27 + movw r12,r18 + mov r0,r4 + com r0 + and r12,r0 + mov r0,r5 + com r0 + and r13,r0 + eor r12,r2 + eor r13,r3 + movw r14,r20 + mov r0,r18 + com r0 + and r14,r0 + mov r0,r19 + com r0 + and r15,r0 + eor r14,r4 + eor r15,r5 + ldd r18,Z+10 + ldd r19,Z+11 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+10,r24 + std Z+11,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+12,r24 + std Z+13,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+14,r24 + std Z+15,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+16,r24 + std Z+17,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+18,r24 + std Z+19,r25 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+20,r24 + std Z+21,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+22,r24 + std Z+23,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+24,r24 + std Z+25,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+26,r24 + std Z+27,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+28,r24 + std Z+29,r25 + ldd r18,Z+30 + ldd r19,Z+31 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+30,r24 + std Z+31,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+32,r24 + std Z+33,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+34,r24 + std Z+35,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+36,r24 + std Z+37,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+38,r24 + std Z+39,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + ldd r4,Z+48 + ldd r5,Z+49 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+40,r24 + std Z+41,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+42,r24 + std Z+43,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+44,r24 + std Z+45,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+46,r24 + std Z+47,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+48,r24 + std Z+49,r25 + ret +1004: + st Z,r6 + std Z+1,r7 + std Z+2,r8 + std Z+3,r9 + std Z+4,r10 + std Z+5,r11 + std Z+6,r12 + std Z+7,r13 + std Z+8,r14 + std Z+9,r15 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size keccakp_400_permute, .-keccakp_400_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.c b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.c index c3c4011..60539df 100644 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.c +++ b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.c @@ -22,74 +22,79 @@ #include "internal-keccak.h" +#if !defined(__AVR__) + /* Faster method to compute ((x + y) % 5) that avoids the division */ static unsigned char const addMod5Table[9] = { 0, 1, 2, 3, 4, 0, 1, 2, 3 }; #define addMod5(x, y) (addMod5Table[(x) + (y)]) -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds) +void keccakp_200_permute(keccakp_200_state_t *state) { static uint8_t const RC[18] = { 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, 0x02, 0x80 }; - uint8_t B[5][5]; + uint8_t C[5]; uint8_t D; unsigned round; unsigned index, index2; - for (round = 18 - rounds; round < 18; ++round) { + for (round = 0; round < 18; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_8(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_8(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate4_8(state->A[0][3]); - B[2][0] = leftRotate1_8(state->A[0][1]); - B[3][0] = leftRotate3_8(state->A[0][4]); - B[4][0] = leftRotate6_8(state->A[0][2]); - B[0][1] = leftRotate4_8(state->A[1][1]); - B[1][1] = leftRotate4_8(state->A[1][4]); - B[2][1] = leftRotate6_8(state->A[1][2]); - B[3][1] = leftRotate4_8(state->A[1][0]); - B[4][1] = leftRotate7_8(state->A[1][3]); - B[0][2] = leftRotate3_8(state->A[2][2]); - B[1][2] = leftRotate3_8(state->A[2][0]); - B[2][2] = leftRotate1_8(state->A[2][3]); - B[3][2] = leftRotate2_8(state->A[2][1]); - B[4][2] = leftRotate7_8(state->A[2][4]); - B[0][3] = leftRotate5_8(state->A[3][3]); - B[1][3] = leftRotate5_8(state->A[3][1]); - B[2][3] = state->A[3][4]; - B[3][3] = leftRotate7_8(state->A[3][2]); - B[4][3] = leftRotate1_8(state->A[3][0]); - B[0][4] = leftRotate6_8(state->A[4][4]); - B[1][4] = leftRotate5_8(state->A[4][2]); - B[2][4] = leftRotate2_8(state->A[4][0]); - B[3][4] = state->A[4][3]; - B[4][4] = leftRotate2_8(state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate4_8(state->A[1][1]); + state->A[1][1] = leftRotate4_8(state->A[1][4]); + state->A[1][4] = leftRotate5_8(state->A[4][2]); + state->A[4][2] = leftRotate7_8(state->A[2][4]); + state->A[2][4] = leftRotate2_8(state->A[4][0]); + state->A[4][0] = leftRotate6_8(state->A[0][2]); + state->A[0][2] = leftRotate3_8(state->A[2][2]); + state->A[2][2] = leftRotate1_8(state->A[2][3]); + state->A[2][3] = state->A[3][4]; + state->A[3][4] = state->A[4][3]; + state->A[4][3] = leftRotate1_8(state->A[3][0]); + state->A[3][0] = leftRotate3_8(state->A[0][4]); + state->A[0][4] = leftRotate6_8(state->A[4][4]); + state->A[4][4] = leftRotate2_8(state->A[4][1]); + state->A[4][1] = leftRotate7_8(state->A[1][3]); + state->A[1][3] = leftRotate5_8(state->A[3][1]); + state->A[3][1] = leftRotate4_8(state->A[1][0]); + state->A[1][0] = leftRotate4_8(state->A[0][3]); + state->A[0][3] = leftRotate5_8(state->A[3][3]); + state->A[3][3] = leftRotate7_8(state->A[3][2]); + state->A[3][2] = leftRotate2_8(state->A[2][1]); + state->A[2][1] = leftRotate6_8(state->A[1][2]); + state->A[1][2] = leftRotate3_8(state->A[2][0]); + state->A[2][0] = leftRotate1_8(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -110,61 +115,64 @@ void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, 0x8002, 0x0080, 0x800A, 0x000A }; - uint16_t B[5][5]; + uint16_t C[5]; uint16_t D; unsigned round; unsigned index, index2; for (round = 20 - rounds; round < 20; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_16(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_16(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate12_16(state->A[0][3]); - B[2][0] = leftRotate1_16 (state->A[0][1]); - B[3][0] = leftRotate11_16(state->A[0][4]); - B[4][0] = leftRotate14_16(state->A[0][2]); - B[0][1] = leftRotate12_16(state->A[1][1]); - B[1][1] = leftRotate4_16 (state->A[1][4]); - B[2][1] = leftRotate6_16 (state->A[1][2]); - B[3][1] = leftRotate4_16 (state->A[1][0]); - B[4][1] = leftRotate7_16 (state->A[1][3]); - B[0][2] = leftRotate11_16(state->A[2][2]); - B[1][2] = leftRotate3_16 (state->A[2][0]); - B[2][2] = leftRotate9_16 (state->A[2][3]); - B[3][2] = leftRotate10_16(state->A[2][1]); - B[4][2] = leftRotate7_16 (state->A[2][4]); - B[0][3] = leftRotate5_16 (state->A[3][3]); - B[1][3] = leftRotate13_16(state->A[3][1]); - B[2][3] = leftRotate8_16 (state->A[3][4]); - B[3][3] = leftRotate15_16(state->A[3][2]); - B[4][3] = leftRotate9_16 (state->A[3][0]); - B[0][4] = leftRotate14_16(state->A[4][4]); - B[1][4] = leftRotate13_16(state->A[4][2]); - B[2][4] = leftRotate2_16 (state->A[4][0]); - B[3][4] = leftRotate8_16 (state->A[4][3]); - B[4][4] = leftRotate2_16 (state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate12_16(state->A[1][1]); + state->A[1][1] = leftRotate4_16 (state->A[1][4]); + state->A[1][4] = leftRotate13_16(state->A[4][2]); + state->A[4][2] = leftRotate7_16 (state->A[2][4]); + state->A[2][4] = leftRotate2_16 (state->A[4][0]); + state->A[4][0] = leftRotate14_16(state->A[0][2]); + state->A[0][2] = leftRotate11_16(state->A[2][2]); + state->A[2][2] = leftRotate9_16 (state->A[2][3]); + state->A[2][3] = leftRotate8_16 (state->A[3][4]); + state->A[3][4] = leftRotate8_16 (state->A[4][3]); + state->A[4][3] = leftRotate9_16 (state->A[3][0]); + state->A[3][0] = leftRotate11_16(state->A[0][4]); + state->A[0][4] = leftRotate14_16(state->A[4][4]); + state->A[4][4] = leftRotate2_16 (state->A[4][1]); + state->A[4][1] = leftRotate7_16 (state->A[1][3]); + state->A[1][3] = leftRotate13_16(state->A[3][1]); + state->A[3][1] = leftRotate4_16 (state->A[1][0]); + state->A[1][0] = leftRotate12_16(state->A[0][3]); + state->A[0][3] = leftRotate5_16 (state->A[3][3]); + state->A[3][3] = leftRotate15_16(state->A[3][2]); + state->A[3][2] = leftRotate10_16(state->A[2][1]); + state->A[2][1] = leftRotate6_16 (state->A[1][2]); + state->A[1][2] = leftRotate3_16 (state->A[2][0]); + state->A[2][0] = leftRotate1_16(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -202,3 +210,5 @@ void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) } #endif + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.h b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.h index 026da50..2ffef42 100644 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.h +++ b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-keccak.h @@ -68,9 +68,8 @@ typedef union * \brief Permutes the Keccak-p[200] state. * * \param state The Keccak-p[200] state to be permuted. - * \param rounds The number of rounds to perform (up to 18). */ -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds); +void keccakp_200_permute(keccakp_200_state_t *state); /** * \brief Permutes the Keccak-p[400] state, which is assumed to be in diff --git a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-util.h b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-util.h index e79158c..e30166d 100644 --- a/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-util.h +++ b/isap/Implementations/crypto_aead/isapa128v20/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.c b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.h b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/api.h b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/encrypt.c b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/encrypt.c deleted file mode 100644 index c54de88..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "isap.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_keccak_128a_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_keccak_128a_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.c b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.h b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-isap.h b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-isap.h deleted file mode 100644 index ba99f2a..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-isap.h +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ISAP variant. - * - * ISAP_ALG_NAME Name of the ISAP algorithm; e.g. isap_keccak_128 - * ISAP_RATE Number of bytes in the rate for hashing and encryption. - * ISAP_sH Number of rounds for hashing. - * ISAP_sE Number of rounds for encryption. - * ISAP_sB Number of rounds for key bit absorption. - * ISAP_sK Number of rounds for keying. - * ISAP_STATE Type for the permuation state; e.g. ascon_state_t - * ISAP_PERMUTE(s,r) Permutes the state "s" with number of rounds "r". - */ -#if defined(ISAP_ALG_NAME) - -#define ISAP_CONCAT_INNER(name,suffix) name##suffix -#define ISAP_CONCAT(name,suffix) ISAP_CONCAT_INNER(name,suffix) - -/* IV string for initialising the associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_A) - [sizeof(ISAP_STATE) - ISAP_NONCE_SIZE] = { - 0x01, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for authenticating associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x02, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for encrypting payload data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x03, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/** - * \brief Re-keys the ISAP permutation state. - * - * \param state The permutation state to be re-keyed. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param iv Points to the initialization vector for this re-keying operation. - * \param data Points to the data to be absorbed to perform the re-keying. - * \param data_len Length of the data to be absorbed. - * - * The output key will be left in the leading bytes of \a state. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *iv, - const unsigned char *data, unsigned data_len) -{ - unsigned bit, num_bits; - - /* Initialize the state with the key and IV */ - memcpy(state->B, k, ISAP_KEY_SIZE); - memcpy(state->B + ISAP_KEY_SIZE, iv, sizeof(state->B) - ISAP_KEY_SIZE); - ISAP_PERMUTE(state, ISAP_sK); - - /* Absorb all of the bits of the data buffer one by one */ - num_bits = data_len * 8 - 1; - for (bit = 0; bit < num_bits; ++bit) { - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sB); - } - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sK); -} - -/** - * \brief Encrypts (or decrypts) a message payload with ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param c Buffer to receive the output ciphertext. - * \param m Buffer to receive the input plaintext. - * \param mlen Length of the input plaintext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_encrypt) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Set up the re-keyed encryption key and nonce in the state */ - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE), npub, ISAP_NONCE_SIZE); - memcpy(state->B + sizeof(ISAP_STATE) - ISAP_NONCE_SIZE, - npub, ISAP_NONCE_SIZE); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= ISAP_RATE) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, ISAP_RATE); - c += ISAP_RATE; - m += ISAP_RATE; - mlen -= ISAP_RATE; - } - if (mlen > 0) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, (unsigned)mlen); - } -} - -/** - * \brief Authenticates the associated data and ciphertext using ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param ad Buffer containing the associated data. - * \param adlen Length of the associated data. - * \param c Buffer containing the ciphertext. - * \param clen Length of the ciphertext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *c, unsigned long long clen, - unsigned char *tag) -{ - unsigned char preserve[sizeof(ISAP_STATE) - ISAP_TAG_SIZE]; - unsigned temp; - - /* Absorb the associated data */ - memcpy(state->B, npub, ISAP_NONCE_SIZE); - memcpy(state->B + ISAP_NONCE_SIZE, ISAP_CONCAT(ISAP_ALG_NAME,_IV_A), - sizeof(state->B) - ISAP_NONCE_SIZE); - ISAP_PERMUTE(state, ISAP_sH); - while (adlen >= ISAP_RATE) { - lw_xor_block(state->B, ad, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - ad += ISAP_RATE; - adlen -= ISAP_RATE; - } - temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - state->B[sizeof(state->B) - 1] ^= 0x01; /* domain separation */ - - /* Absorb the ciphertext */ - while (clen >= ISAP_RATE) { - lw_xor_block(state->B, c, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - c += ISAP_RATE; - clen -= ISAP_RATE; - } - temp = (unsigned)clen; - lw_xor_block(state->B, c, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - - /* Re-key the state and generate the authentication tag */ - memcpy(tag, state->B, ISAP_TAG_SIZE); - memcpy(preserve, state->B + ISAP_TAG_SIZE, sizeof(preserve)); - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA), tag, ISAP_TAG_SIZE); - memcpy(state->B + ISAP_TAG_SIZE, preserve, sizeof(preserve)); - ISAP_PERMUTE(state, ISAP_sH); - memcpy(tag, state->B, ISAP_TAG_SIZE); -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ISAP_TAG_SIZE; - - /* Encrypt the plaintext to produce the ciphertext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, c, m, mlen); - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (&state, k, npub, ad, adlen, c, mlen, c + mlen); - return 0; -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - unsigned char tag[ISAP_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ISAP_TAG_SIZE) - return -1; - *mlen = clen - ISAP_TAG_SIZE; - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac)(&state, k, npub, ad, adlen, c, *mlen, tag); - - /* Decrypt the ciphertext to produce the plaintext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, m, c, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, tag, c + *mlen, ISAP_TAG_SIZE); -} - -#endif /* ISAP_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ISAP algorithm */ -#undef ISAP_ALG_NAME -#undef ISAP_RATE -#undef ISAP_sH -#undef ISAP_sE -#undef ISAP_sB -#undef ISAP_sK -#undef ISAP_STATE -#undef ISAP_PERMUTE -#undef ISAP_CONCAT_INNER -#undef ISAP_CONCAT diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak-avr.S deleted file mode 100644 index e50ccaf..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak-avr.S +++ /dev/null @@ -1,1552 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global keccakp_200_permute - .type keccakp_200_permute, @function -keccakp_200_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - ldd r24,Z+24 - push r31 - push r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,130 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - mov r30,r1 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,129 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - ldi r30,136 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,10 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,137 - eor r18,r30 - rcall 82f - ldi r30,3 - eor r18,r30 - rcall 82f - ldi r30,2 - eor r18,r30 - rcall 82f - ldi r30,128 - eor r18,r30 - rjmp 420f -82: - mov r30,r18 - eor r30,r23 - eor r30,r2 - eor r30,r7 - eor r30,r12 - mov r31,r19 - eor r31,r26 - eor r31,r3 - eor r31,r8 - eor r31,r13 - mov r25,r20 - eor r25,r27 - eor r25,r4 - eor r25,r9 - eor r25,r14 - mov r16,r21 - eor r16,r28 - eor r16,r5 - eor r16,r10 - eor r16,r15 - mov r17,r22 - eor r17,r29 - eor r17,r6 - eor r17,r11 - eor r17,r24 - mov r0,r31 - lsl r0 - adc r0,r1 - eor r0,r17 - eor r18,r0 - eor r23,r0 - eor r2,r0 - eor r7,r0 - eor r12,r0 - mov r0,r25 - lsl r0 - adc r0,r1 - eor r0,r30 - eor r19,r0 - eor r26,r0 - eor r3,r0 - eor r8,r0 - eor r13,r0 - mov r0,r16 - lsl r0 - adc r0,r1 - eor r0,r31 - eor r20,r0 - eor r27,r0 - eor r4,r0 - eor r9,r0 - eor r14,r0 - mov r0,r17 - lsl r0 - adc r0,r1 - eor r0,r25 - eor r21,r0 - eor r28,r0 - eor r5,r0 - eor r10,r0 - eor r15,r0 - mov r0,r30 - lsl r0 - adc r0,r1 - eor r0,r16 - eor r22,r0 - eor r29,r0 - eor r6,r0 - eor r11,r0 - eor r24,r0 - mov r30,r19 - swap r26 - mov r19,r26 - swap r29 - mov r26,r29 - mov r0,r1 - lsr r14 - ror r0 - lsr r14 - ror r0 - lsr r14 - ror r0 - or r14,r0 - mov r29,r14 - bst r6,0 - lsr r6 - bld r6,7 - mov r14,r6 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - mov r6,r12 - mov r0,r1 - lsr r20 - ror r0 - lsr r20 - ror r0 - or r20,r0 - mov r12,r20 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - mov r20,r4 - lsl r5 - adc r5,r1 - mov r4,r5 - mov r5,r11 - mov r11,r15 - lsl r7 - adc r7,r1 - mov r15,r7 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - mov r7,r22 - mov r0,r1 - lsr r24 - ror r0 - lsr r24 - ror r0 - or r24,r0 - mov r22,r24 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r24,r13 - bst r28,0 - lsr r28 - bld r28,7 - mov r13,r28 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r28,r8 - swap r23 - mov r8,r23 - swap r21 - mov r23,r21 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r21,r10 - bst r9,0 - lsr r9 - bld r9,7 - mov r10,r9 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - mov r9,r3 - mov r0,r1 - lsr r27 - ror r0 - lsr r27 - ror r0 - or r27,r0 - mov r3,r27 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - mov r27,r2 - lsl r30 - adc r30,r1 - mov r2,r30 - mov r30,r18 - mov r31,r19 - mov r25,r20 - mov r16,r21 - mov r17,r22 - mov r18,r25 - mov r0,r31 - com r0 - and r18,r0 - eor r18,r30 - mov r19,r16 - mov r0,r25 - com r0 - and r19,r0 - eor r19,r31 - mov r20,r17 - mov r0,r16 - com r0 - and r20,r0 - eor r20,r25 - mov r21,r30 - mov r0,r17 - com r0 - and r21,r0 - eor r21,r16 - mov r22,r31 - mov r0,r30 - com r0 - and r22,r0 - eor r22,r17 - mov r30,r23 - mov r31,r26 - mov r25,r27 - mov r16,r28 - mov r17,r29 - mov r23,r25 - mov r0,r31 - com r0 - and r23,r0 - eor r23,r30 - mov r26,r16 - mov r0,r25 - com r0 - and r26,r0 - eor r26,r31 - mov r27,r17 - mov r0,r16 - com r0 - and r27,r0 - eor r27,r25 - mov r28,r30 - mov r0,r17 - com r0 - and r28,r0 - eor r28,r16 - mov r29,r31 - mov r0,r30 - com r0 - and r29,r0 - eor r29,r17 - mov r30,r2 - mov r31,r3 - mov r25,r4 - mov r16,r5 - mov r17,r6 - mov r2,r25 - mov r0,r31 - com r0 - and r2,r0 - eor r2,r30 - mov r3,r16 - mov r0,r25 - com r0 - and r3,r0 - eor r3,r31 - mov r4,r17 - mov r0,r16 - com r0 - and r4,r0 - eor r4,r25 - mov r5,r30 - mov r0,r17 - com r0 - and r5,r0 - eor r5,r16 - mov r6,r31 - mov r0,r30 - com r0 - and r6,r0 - eor r6,r17 - mov r30,r7 - mov r31,r8 - mov r25,r9 - mov r16,r10 - mov r17,r11 - mov r7,r25 - mov r0,r31 - com r0 - and r7,r0 - eor r7,r30 - mov r8,r16 - mov r0,r25 - com r0 - and r8,r0 - eor r8,r31 - mov r9,r17 - mov r0,r16 - com r0 - and r9,r0 - eor r9,r25 - mov r10,r30 - mov r0,r17 - com r0 - and r10,r0 - eor r10,r16 - mov r11,r31 - mov r0,r30 - com r0 - and r11,r0 - eor r11,r17 - mov r30,r12 - mov r31,r13 - mov r25,r14 - mov r16,r15 - mov r17,r24 - mov r12,r25 - mov r0,r31 - com r0 - and r12,r0 - eor r12,r30 - mov r13,r16 - mov r0,r25 - com r0 - and r13,r0 - eor r13,r31 - mov r14,r17 - mov r0,r16 - com r0 - and r14,r0 - eor r14,r25 - mov r15,r30 - mov r0,r17 - com r0 - and r15,r0 - eor r15,r16 - mov r24,r31 - mov r0,r30 - com r0 - and r24,r0 - eor r24,r17 - ret -420: - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - std Z+24,r24 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size keccakp_200_permute, .-keccakp_200_permute - - .text -.global keccakp_400_permute - .type keccakp_400_permute, @function -keccakp_400_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - movw r30,r24 -.L__stack_usage = 17 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - ldd r10,Z+4 - ldd r11,Z+5 - ldd r12,Z+6 - ldd r13,Z+7 - ldd r14,Z+8 - ldd r15,Z+9 - cpi r22,20 - brcs 15f - rcall 153f - ldi r23,1 - eor r6,r23 -15: - cpi r22,19 - brcs 23f - rcall 153f - ldi r23,130 - eor r6,r23 - ldi r17,128 - eor r7,r17 -23: - cpi r22,18 - brcs 31f - rcall 153f - ldi r23,138 - eor r6,r23 - ldi r17,128 - eor r7,r17 -31: - cpi r22,17 - brcs 37f - rcall 153f - ldi r23,128 - eor r7,r23 -37: - cpi r22,16 - brcs 45f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -45: - cpi r22,15 - brcs 51f - rcall 153f - ldi r23,1 - eor r6,r23 -51: - cpi r22,14 - brcs 59f - rcall 153f - ldi r23,129 - eor r6,r23 - ldi r17,128 - eor r7,r17 -59: - cpi r22,13 - brcs 67f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -67: - cpi r22,12 - brcs 73f - rcall 153f - ldi r23,138 - eor r6,r23 -73: - cpi r22,11 - brcs 79f - rcall 153f - ldi r23,136 - eor r6,r23 -79: - cpi r22,10 - brcs 87f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -87: - cpi r22,9 - brcs 93f - rcall 153f - ldi r23,10 - eor r6,r23 -93: - cpi r22,8 - brcs 101f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -101: - cpi r22,7 - brcs 107f - rcall 153f - ldi r23,139 - eor r6,r23 -107: - cpi r22,6 - brcs 115f - rcall 153f - ldi r23,137 - eor r6,r23 - ldi r17,128 - eor r7,r17 -115: - cpi r22,5 - brcs 123f - rcall 153f - ldi r23,3 - eor r6,r23 - ldi r17,128 - eor r7,r17 -123: - cpi r22,4 - brcs 131f - rcall 153f - ldi r23,2 - eor r6,r23 - ldi r17,128 - eor r7,r17 -131: - cpi r22,3 - brcs 137f - rcall 153f - ldi r23,128 - eor r6,r23 -137: - cpi r22,2 - brcs 145f - rcall 153f - ldi r23,10 - eor r6,r23 - ldi r17,128 - eor r7,r17 -145: - cpi r22,1 - brcs 151f - rcall 153f - ldi r23,10 - eor r6,r23 -151: - rjmp 1004f -153: - movw r18,r6 - ldd r0,Z+10 - eor r18,r0 - ldd r0,Z+11 - eor r19,r0 - ldd r0,Z+20 - eor r18,r0 - ldd r0,Z+21 - eor r19,r0 - ldd r0,Z+30 - eor r18,r0 - ldd r0,Z+31 - eor r19,r0 - ldd r0,Z+40 - eor r18,r0 - ldd r0,Z+41 - eor r19,r0 - movw r20,r8 - ldd r0,Z+12 - eor r20,r0 - ldd r0,Z+13 - eor r21,r0 - ldd r0,Z+22 - eor r20,r0 - ldd r0,Z+23 - eor r21,r0 - ldd r0,Z+32 - eor r20,r0 - ldd r0,Z+33 - eor r21,r0 - ldd r0,Z+42 - eor r20,r0 - ldd r0,Z+43 - eor r21,r0 - movw r26,r10 - ldd r0,Z+14 - eor r26,r0 - ldd r0,Z+15 - eor r27,r0 - ldd r0,Z+24 - eor r26,r0 - ldd r0,Z+25 - eor r27,r0 - ldd r0,Z+34 - eor r26,r0 - ldd r0,Z+35 - eor r27,r0 - ldd r0,Z+44 - eor r26,r0 - ldd r0,Z+45 - eor r27,r0 - movw r2,r12 - ldd r0,Z+16 - eor r2,r0 - ldd r0,Z+17 - eor r3,r0 - ldd r0,Z+26 - eor r2,r0 - ldd r0,Z+27 - eor r3,r0 - ldd r0,Z+36 - eor r2,r0 - ldd r0,Z+37 - eor r3,r0 - ldd r0,Z+46 - eor r2,r0 - ldd r0,Z+47 - eor r3,r0 - movw r4,r14 - ldd r0,Z+18 - eor r4,r0 - ldd r0,Z+19 - eor r5,r0 - ldd r0,Z+28 - eor r4,r0 - ldd r0,Z+29 - eor r5,r0 - ldd r0,Z+38 - eor r4,r0 - ldd r0,Z+39 - eor r5,r0 - ldd r0,Z+48 - eor r4,r0 - ldd r0,Z+49 - eor r5,r0 - movw r24,r20 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r4 - eor r25,r5 - eor r6,r24 - eor r7,r25 - ldd r0,Z+10 - eor r0,r24 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r25 - std Z+11,r0 - ldd r0,Z+20 - eor r0,r24 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r25 - std Z+21,r0 - ldd r0,Z+30 - eor r0,r24 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r25 - std Z+31,r0 - ldd r0,Z+40 - eor r0,r24 - std Z+40,r0 - ldd r0,Z+41 - eor r0,r25 - std Z+41,r0 - movw r24,r26 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r8,r24 - eor r9,r25 - ldd r0,Z+12 - eor r0,r24 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r25 - std Z+13,r0 - ldd r0,Z+22 - eor r0,r24 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r25 - std Z+23,r0 - ldd r0,Z+32 - eor r0,r24 - std Z+32,r0 - ldd r0,Z+33 - eor r0,r25 - std Z+33,r0 - ldd r0,Z+42 - eor r0,r24 - std Z+42,r0 - ldd r0,Z+43 - eor r0,r25 - std Z+43,r0 - movw r24,r2 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r10,r24 - eor r11,r25 - ldd r0,Z+14 - eor r0,r24 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r25 - std Z+15,r0 - ldd r0,Z+24 - eor r0,r24 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r25 - std Z+25,r0 - ldd r0,Z+34 - eor r0,r24 - std Z+34,r0 - ldd r0,Z+35 - eor r0,r25 - std Z+35,r0 - ldd r0,Z+44 - eor r0,r24 - std Z+44,r0 - ldd r0,Z+45 - eor r0,r25 - std Z+45,r0 - movw r24,r4 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r26 - eor r25,r27 - eor r12,r24 - eor r13,r25 - ldd r0,Z+16 - eor r0,r24 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r25 - std Z+17,r0 - ldd r0,Z+26 - eor r0,r24 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r25 - std Z+27,r0 - ldd r0,Z+36 - eor r0,r24 - std Z+36,r0 - ldd r0,Z+37 - eor r0,r25 - std Z+37,r0 - ldd r0,Z+46 - eor r0,r24 - std Z+46,r0 - ldd r0,Z+47 - eor r0,r25 - std Z+47,r0 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r2 - eor r25,r3 - eor r14,r24 - eor r15,r25 - ldd r0,Z+18 - eor r0,r24 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r25 - std Z+19,r0 - ldd r0,Z+28 - eor r0,r24 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r25 - std Z+29,r0 - ldd r0,Z+38 - eor r0,r24 - std Z+38,r0 - ldd r0,Z+39 - eor r0,r25 - std Z+39,r0 - ldd r0,Z+48 - eor r0,r24 - std Z+48,r0 - ldd r0,Z+49 - eor r0,r25 - std Z+49,r0 - movw r24,r8 - ldd r8,Z+12 - ldd r9,Z+13 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldd r18,Z+18 - ldd r19,Z+19 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+12,r18 - std Z+13,r19 - ldd r18,Z+44 - ldd r19,Z+45 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+18,r18 - std Z+19,r19 - ldd r18,Z+28 - ldd r19,Z+29 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+44,r18 - std Z+45,r19 - ldd r18,Z+40 - ldd r19,Z+41 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+28,r18 - std Z+29,r19 - movw r18,r10 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+40,r18 - std Z+41,r19 - ldd r10,Z+24 - ldd r11,Z+25 - mov r0,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldd r18,Z+26 - ldd r19,Z+27 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+24,r18 - std Z+25,r19 - ldd r18,Z+38 - ldd r19,Z+39 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+26,r18 - std Z+27,r19 - ldd r18,Z+46 - ldd r19,Z+47 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+38,r18 - std Z+39,r19 - ldd r18,Z+30 - ldd r19,Z+31 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+46,r18 - std Z+47,r19 - movw r18,r14 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+30,r18 - std Z+31,r19 - ldd r14,Z+48 - ldd r15,Z+49 - mov r0,r1 - lsr r15 - ror r14 - ror r0 - lsr r15 - ror r14 - ror r0 - or r15,r0 - ldd r18,Z+42 - ldd r19,Z+43 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+48,r18 - std Z+49,r19 - ldd r18,Z+16 - ldd r19,Z+17 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+42,r18 - std Z+43,r19 - ldd r18,Z+32 - ldd r19,Z+33 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+16,r18 - std Z+17,r19 - ldd r18,Z+10 - ldd r19,Z+11 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+32,r18 - std Z+33,r19 - movw r18,r12 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+10,r18 - std Z+11,r19 - ldd r12,Z+36 - ldd r13,Z+37 - mov r0,r13 - mov r13,r12 - mov r12,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - ldd r18,Z+34 - ldd r19,Z+35 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+36,r18 - std Z+37,r19 - ldd r18,Z+22 - ldd r19,Z+23 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+34,r18 - std Z+35,r19 - ldd r18,Z+14 - ldd r19,Z+15 - mov r0,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+22,r18 - std Z+23,r19 - ldd r18,Z+20 - ldd r19,Z+21 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+14,r18 - std Z+15,r19 - lsl r24 - rol r25 - adc r24,r1 - std Z+20,r24 - std Z+21,r25 - movw r18,r6 - movw r20,r8 - movw r26,r10 - movw r2,r12 - movw r4,r14 - movw r6,r26 - mov r0,r20 - com r0 - and r6,r0 - mov r0,r21 - com r0 - and r7,r0 - eor r6,r18 - eor r7,r19 - movw r8,r2 - mov r0,r26 - com r0 - and r8,r0 - mov r0,r27 - com r0 - and r9,r0 - eor r8,r20 - eor r9,r21 - movw r10,r4 - mov r0,r2 - com r0 - and r10,r0 - mov r0,r3 - com r0 - and r11,r0 - eor r10,r26 - eor r11,r27 - movw r12,r18 - mov r0,r4 - com r0 - and r12,r0 - mov r0,r5 - com r0 - and r13,r0 - eor r12,r2 - eor r13,r3 - movw r14,r20 - mov r0,r18 - com r0 - and r14,r0 - mov r0,r19 - com r0 - and r15,r0 - eor r14,r4 - eor r15,r5 - ldd r18,Z+10 - ldd r19,Z+11 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+10,r24 - std Z+11,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+12,r24 - std Z+13,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+14,r24 - std Z+15,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+16,r24 - std Z+17,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+18,r24 - std Z+19,r25 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+20,r24 - std Z+21,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+22,r24 - std Z+23,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+24,r24 - std Z+25,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+26,r24 - std Z+27,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+28,r24 - std Z+29,r25 - ldd r18,Z+30 - ldd r19,Z+31 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+30,r24 - std Z+31,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+32,r24 - std Z+33,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+34,r24 - std Z+35,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+36,r24 - std Z+37,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+38,r24 - std Z+39,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - ldd r4,Z+48 - ldd r5,Z+49 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+40,r24 - std Z+41,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+42,r24 - std Z+43,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+44,r24 - std Z+45,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+46,r24 - std Z+47,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+48,r24 - std Z+49,r25 - ret -1004: - st Z,r6 - std Z+1,r7 - std Z+2,r8 - std Z+3,r9 - std Z+4,r10 - std Z+5,r11 - std Z+6,r12 - std Z+7,r13 - std Z+8,r14 - std Z+9,r15 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size keccakp_400_permute, .-keccakp_400_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.c b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.c deleted file mode 100644 index 60539df..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-keccak.h" - -#if !defined(__AVR__) - -/* Faster method to compute ((x + y) % 5) that avoids the division */ -static unsigned char const addMod5Table[9] = { - 0, 1, 2, 3, 4, 0, 1, 2, 3 -}; -#define addMod5(x, y) (addMod5Table[(x) + (y)]) - -void keccakp_200_permute(keccakp_200_state_t *state) -{ - static uint8_t const RC[18] = { - 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, - 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, - 0x02, 0x80 - }; - uint8_t C[5]; - uint8_t D; - unsigned round; - unsigned index, index2; - for (round = 0; round < 18; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_8(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate4_8(state->A[1][1]); - state->A[1][1] = leftRotate4_8(state->A[1][4]); - state->A[1][4] = leftRotate5_8(state->A[4][2]); - state->A[4][2] = leftRotate7_8(state->A[2][4]); - state->A[2][4] = leftRotate2_8(state->A[4][0]); - state->A[4][0] = leftRotate6_8(state->A[0][2]); - state->A[0][2] = leftRotate3_8(state->A[2][2]); - state->A[2][2] = leftRotate1_8(state->A[2][3]); - state->A[2][3] = state->A[3][4]; - state->A[3][4] = state->A[4][3]; - state->A[4][3] = leftRotate1_8(state->A[3][0]); - state->A[3][0] = leftRotate3_8(state->A[0][4]); - state->A[0][4] = leftRotate6_8(state->A[4][4]); - state->A[4][4] = leftRotate2_8(state->A[4][1]); - state->A[4][1] = leftRotate7_8(state->A[1][3]); - state->A[1][3] = leftRotate5_8(state->A[3][1]); - state->A[3][1] = leftRotate4_8(state->A[1][0]); - state->A[1][0] = leftRotate4_8(state->A[0][3]); - state->A[0][3] = leftRotate5_8(state->A[3][3]); - state->A[3][3] = leftRotate7_8(state->A[3][2]); - state->A[3][2] = leftRotate2_8(state->A[2][1]); - state->A[2][1] = leftRotate6_8(state->A[1][2]); - state->A[1][2] = leftRotate3_8(state->A[2][0]); - state->A[2][0] = leftRotate1_8(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define keccakp_400_permute_host keccakp_400_permute -#endif - -/* Keccak-p[400] that assumes that the input is already in host byte order */ -void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) -{ - static uint16_t const RC[20] = { - 0x0001, 0x8082, 0x808A, 0x8000, 0x808B, 0x0001, 0x8081, 0x8009, - 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, - 0x8002, 0x0080, 0x800A, 0x000A - }; - uint16_t C[5]; - uint16_t D; - unsigned round; - unsigned index, index2; - for (round = 20 - rounds; round < 20; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_16(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate12_16(state->A[1][1]); - state->A[1][1] = leftRotate4_16 (state->A[1][4]); - state->A[1][4] = leftRotate13_16(state->A[4][2]); - state->A[4][2] = leftRotate7_16 (state->A[2][4]); - state->A[2][4] = leftRotate2_16 (state->A[4][0]); - state->A[4][0] = leftRotate14_16(state->A[0][2]); - state->A[0][2] = leftRotate11_16(state->A[2][2]); - state->A[2][2] = leftRotate9_16 (state->A[2][3]); - state->A[2][3] = leftRotate8_16 (state->A[3][4]); - state->A[3][4] = leftRotate8_16 (state->A[4][3]); - state->A[4][3] = leftRotate9_16 (state->A[3][0]); - state->A[3][0] = leftRotate11_16(state->A[0][4]); - state->A[0][4] = leftRotate14_16(state->A[4][4]); - state->A[4][4] = leftRotate2_16 (state->A[4][1]); - state->A[4][1] = leftRotate7_16 (state->A[1][3]); - state->A[1][3] = leftRotate13_16(state->A[3][1]); - state->A[3][1] = leftRotate4_16 (state->A[1][0]); - state->A[1][0] = leftRotate12_16(state->A[0][3]); - state->A[0][3] = leftRotate5_16 (state->A[3][3]); - state->A[3][3] = leftRotate15_16(state->A[3][2]); - state->A[3][2] = leftRotate10_16(state->A[2][1]); - state->A[2][1] = leftRotate6_16 (state->A[1][2]); - state->A[1][2] = leftRotate3_16 (state->A[2][0]); - state->A[2][0] = leftRotate1_16(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if !defined(LW_UTIL_LITTLE_ENDIAN) - -/** - * \brief Reverses the bytes in a Keccak-p[400] state. - * - * \param state The Keccak-p[400] state to apply byte-reversal to. - */ -static void keccakp_400_reverse_bytes(keccakp_400_state_t *state) -{ - unsigned index; - unsigned char temp1; - unsigned char temp2; - for (index = 0; index < 50; index += 2) { - temp1 = state->B[index]; - temp2 = state->B[index + 1]; - state->B[index] = temp2; - state->B[index + 1] = temp1; - } -} - -/* Keccak-p[400] that requires byte reversal on input and output */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) -{ - keccakp_400_reverse_bytes(state); - keccakp_400_permute_host(state, rounds); - keccakp_400_reverse_bytes(state); -} - -#endif - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.h b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.h deleted file mode 100644 index 2ffef42..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-keccak.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KECCAK_H -#define LW_INTERNAL_KECCAK_H - -#include "internal-util.h" - -/** - * \file internal-keccak.h - * \brief Internal implementation of the Keccak-p permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for the Keccak-p[200] permutation. - */ -#define KECCAKP_200_STATE_SIZE 25 - -/** - * \brief Size of the state for the Keccak-p[400] permutation. - */ -#define KECCAKP_400_STATE_SIZE 50 - -/** - * \brief Structure of the internal state of the Keccak-p[200] permutation. - */ -typedef union -{ - uint8_t A[5][5]; /**< Keccak-p[200] state as a 5x5 array of lanes */ - uint8_t B[25]; /**< Keccak-p[200] state as a byte array */ - -} keccakp_200_state_t; - -/** - * \brief Structure of the internal state of the Keccak-p[400] permutation. - */ -typedef union -{ - uint16_t A[5][5]; /**< Keccak-p[400] state as a 5x5 array of lanes */ - uint8_t B[50]; /**< Keccak-p[400] state as a byte array */ - -} keccakp_400_state_t; - -/** - * \brief Permutes the Keccak-p[200] state. - * - * \param state The Keccak-p[200] state to be permuted. - */ -void keccakp_200_permute(keccakp_200_state_t *state); - -/** - * \brief Permutes the Keccak-p[400] state, which is assumed to be in - * little-endian byte order. - * - * \param state The Keccak-p[400] state to be permuted. - * \param rounds The number of rounds to perform (up to 20). - */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-util.h b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.c b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.c deleted file mode 100644 index 26d50a3..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "isap.h" -#include "internal-keccak.h" -#include "internal-ascon.h" -#include - -aead_cipher_t const isap_keccak_128a_cipher = { - "ISAP-K-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128a_aead_encrypt, - isap_keccak_128a_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128a_cipher = { - "ISAP-A-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128a_aead_encrypt, - isap_ascon_128a_aead_decrypt -}; - -aead_cipher_t const isap_keccak_128_cipher = { - "ISAP-K-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128_aead_encrypt, - isap_keccak_128_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128_cipher = { - "ISAP-A-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128_aead_encrypt, - isap_ascon_128_aead_decrypt -}; - -/* ISAP-K-128A */ -#define ISAP_ALG_NAME isap_keccak_128a -#define ISAP_RATE (144 / 8) -#define ISAP_sH 16 -#define ISAP_sE 8 -#define ISAP_sB 1 -#define ISAP_sK 8 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128A */ -#define ISAP_ALG_NAME isap_ascon_128a -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 6 -#define ISAP_sB 1 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" - -/* ISAP-K-128 */ -#define ISAP_ALG_NAME isap_keccak_128 -#define ISAP_RATE (144 / 8) -#define ISAP_sH 20 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128 */ -#define ISAP_ALG_NAME isap_ascon_128 -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.h b/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.h deleted file mode 100644 index ddf8203..0000000 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys-avr/isap.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ISAP_H -#define LWCRYPTO_ISAP_H - -#include "aead-common.h" - -/** - * \file isap.h - * \brief ISAP authenticated encryption algorithm. - * - * ISAP is a family of authenticated encryption algorithms that are built - * around the Keccak-p[400] or ASCON permutations. There are four algorithms - * in the family, each of which have a 128-bit key, a 128-bit nonce, and a - * 128-bit tag: - * - * \li ISAP-K-128A based around the Keccak-p[400] permutation with a - * reduced number of rounds. This is the primary member in the family. - * \li ISAP-A-128A based around the ASCON permutation with a reduced - * number of rounds. - * \li ISAP-K-128 based around the Keccak-p[400] permutation. - * \li ISAP-A-128 based around the ASCON permutation. - * - * ISAP is designed to provide some protection against adversaries - * using differential power analysis to determine the key. The - * downside is that key setup is very slow. - * - * References: https://isap.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all ISAP family members. - */ -#define ISAP_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all ISAP family members. - */ -#define ISAP_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all ISAP family members. - */ -#define ISAP_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the ISAP-K-128A cipher. - */ -extern aead_cipher_t const isap_keccak_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128A cipher. - */ -extern aead_cipher_t const isap_ascon_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-K-128 cipher. - */ -extern aead_cipher_t const isap_keccak_128_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128 cipher. - */ -extern aead_cipher_t const isap_ascon_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128a_aead_decrypt() - */ -int isap_keccak_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128a_aead_encrypt() - */ -int isap_keccak_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128a_aead_decrypt() - */ -int isap_ascon_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128a_aead_encrypt() - */ -int isap_ascon_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128_aead_decrypt() - */ -int isap_keccak_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128_aead_encrypt() - */ -int isap_keccak_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128_aead_decrypt() - */ -int isap_ascon_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128_aead_encrypt() - */ -int isap_ascon_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon.c b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon.c index 12a8ec6..657aabe 100644 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon.c +++ b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-ascon.c @@ -22,6 +22,8 @@ #include "internal-ascon.h" +#if !defined(__AVR__) + void ascon_permute(ascon_state_t *state, uint8_t first_round) { uint64_t t0, t1, t2, t3, t4; @@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round) state->S[4] = x4; #endif } + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak-avr.S new file mode 100644 index 0000000..e50ccaf --- /dev/null +++ b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak-avr.S @@ -0,0 +1,1552 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global keccakp_200_permute + .type keccakp_200_permute, @function +keccakp_200_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + ldd r24,Z+24 + push r31 + push r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,130 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + mov r30,r1 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,129 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + ldi r30,136 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,10 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,137 + eor r18,r30 + rcall 82f + ldi r30,3 + eor r18,r30 + rcall 82f + ldi r30,2 + eor r18,r30 + rcall 82f + ldi r30,128 + eor r18,r30 + rjmp 420f +82: + mov r30,r18 + eor r30,r23 + eor r30,r2 + eor r30,r7 + eor r30,r12 + mov r31,r19 + eor r31,r26 + eor r31,r3 + eor r31,r8 + eor r31,r13 + mov r25,r20 + eor r25,r27 + eor r25,r4 + eor r25,r9 + eor r25,r14 + mov r16,r21 + eor r16,r28 + eor r16,r5 + eor r16,r10 + eor r16,r15 + mov r17,r22 + eor r17,r29 + eor r17,r6 + eor r17,r11 + eor r17,r24 + mov r0,r31 + lsl r0 + adc r0,r1 + eor r0,r17 + eor r18,r0 + eor r23,r0 + eor r2,r0 + eor r7,r0 + eor r12,r0 + mov r0,r25 + lsl r0 + adc r0,r1 + eor r0,r30 + eor r19,r0 + eor r26,r0 + eor r3,r0 + eor r8,r0 + eor r13,r0 + mov r0,r16 + lsl r0 + adc r0,r1 + eor r0,r31 + eor r20,r0 + eor r27,r0 + eor r4,r0 + eor r9,r0 + eor r14,r0 + mov r0,r17 + lsl r0 + adc r0,r1 + eor r0,r25 + eor r21,r0 + eor r28,r0 + eor r5,r0 + eor r10,r0 + eor r15,r0 + mov r0,r30 + lsl r0 + adc r0,r1 + eor r0,r16 + eor r22,r0 + eor r29,r0 + eor r6,r0 + eor r11,r0 + eor r24,r0 + mov r30,r19 + swap r26 + mov r19,r26 + swap r29 + mov r26,r29 + mov r0,r1 + lsr r14 + ror r0 + lsr r14 + ror r0 + lsr r14 + ror r0 + or r14,r0 + mov r29,r14 + bst r6,0 + lsr r6 + bld r6,7 + mov r14,r6 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + mov r6,r12 + mov r0,r1 + lsr r20 + ror r0 + lsr r20 + ror r0 + or r20,r0 + mov r12,r20 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + mov r20,r4 + lsl r5 + adc r5,r1 + mov r4,r5 + mov r5,r11 + mov r11,r15 + lsl r7 + adc r7,r1 + mov r15,r7 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + mov r7,r22 + mov r0,r1 + lsr r24 + ror r0 + lsr r24 + ror r0 + or r24,r0 + mov r22,r24 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r24,r13 + bst r28,0 + lsr r28 + bld r28,7 + mov r13,r28 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r28,r8 + swap r23 + mov r8,r23 + swap r21 + mov r23,r21 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r21,r10 + bst r9,0 + lsr r9 + bld r9,7 + mov r10,r9 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + mov r9,r3 + mov r0,r1 + lsr r27 + ror r0 + lsr r27 + ror r0 + or r27,r0 + mov r3,r27 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + mov r27,r2 + lsl r30 + adc r30,r1 + mov r2,r30 + mov r30,r18 + mov r31,r19 + mov r25,r20 + mov r16,r21 + mov r17,r22 + mov r18,r25 + mov r0,r31 + com r0 + and r18,r0 + eor r18,r30 + mov r19,r16 + mov r0,r25 + com r0 + and r19,r0 + eor r19,r31 + mov r20,r17 + mov r0,r16 + com r0 + and r20,r0 + eor r20,r25 + mov r21,r30 + mov r0,r17 + com r0 + and r21,r0 + eor r21,r16 + mov r22,r31 + mov r0,r30 + com r0 + and r22,r0 + eor r22,r17 + mov r30,r23 + mov r31,r26 + mov r25,r27 + mov r16,r28 + mov r17,r29 + mov r23,r25 + mov r0,r31 + com r0 + and r23,r0 + eor r23,r30 + mov r26,r16 + mov r0,r25 + com r0 + and r26,r0 + eor r26,r31 + mov r27,r17 + mov r0,r16 + com r0 + and r27,r0 + eor r27,r25 + mov r28,r30 + mov r0,r17 + com r0 + and r28,r0 + eor r28,r16 + mov r29,r31 + mov r0,r30 + com r0 + and r29,r0 + eor r29,r17 + mov r30,r2 + mov r31,r3 + mov r25,r4 + mov r16,r5 + mov r17,r6 + mov r2,r25 + mov r0,r31 + com r0 + and r2,r0 + eor r2,r30 + mov r3,r16 + mov r0,r25 + com r0 + and r3,r0 + eor r3,r31 + mov r4,r17 + mov r0,r16 + com r0 + and r4,r0 + eor r4,r25 + mov r5,r30 + mov r0,r17 + com r0 + and r5,r0 + eor r5,r16 + mov r6,r31 + mov r0,r30 + com r0 + and r6,r0 + eor r6,r17 + mov r30,r7 + mov r31,r8 + mov r25,r9 + mov r16,r10 + mov r17,r11 + mov r7,r25 + mov r0,r31 + com r0 + and r7,r0 + eor r7,r30 + mov r8,r16 + mov r0,r25 + com r0 + and r8,r0 + eor r8,r31 + mov r9,r17 + mov r0,r16 + com r0 + and r9,r0 + eor r9,r25 + mov r10,r30 + mov r0,r17 + com r0 + and r10,r0 + eor r10,r16 + mov r11,r31 + mov r0,r30 + com r0 + and r11,r0 + eor r11,r17 + mov r30,r12 + mov r31,r13 + mov r25,r14 + mov r16,r15 + mov r17,r24 + mov r12,r25 + mov r0,r31 + com r0 + and r12,r0 + eor r12,r30 + mov r13,r16 + mov r0,r25 + com r0 + and r13,r0 + eor r13,r31 + mov r14,r17 + mov r0,r16 + com r0 + and r14,r0 + eor r14,r25 + mov r15,r30 + mov r0,r17 + com r0 + and r15,r0 + eor r15,r16 + mov r24,r31 + mov r0,r30 + com r0 + and r24,r0 + eor r24,r17 + ret +420: + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + std Z+24,r24 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size keccakp_200_permute, .-keccakp_200_permute + + .text +.global keccakp_400_permute + .type keccakp_400_permute, @function +keccakp_400_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + movw r30,r24 +.L__stack_usage = 17 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + ldd r10,Z+4 + ldd r11,Z+5 + ldd r12,Z+6 + ldd r13,Z+7 + ldd r14,Z+8 + ldd r15,Z+9 + cpi r22,20 + brcs 15f + rcall 153f + ldi r23,1 + eor r6,r23 +15: + cpi r22,19 + brcs 23f + rcall 153f + ldi r23,130 + eor r6,r23 + ldi r17,128 + eor r7,r17 +23: + cpi r22,18 + brcs 31f + rcall 153f + ldi r23,138 + eor r6,r23 + ldi r17,128 + eor r7,r17 +31: + cpi r22,17 + brcs 37f + rcall 153f + ldi r23,128 + eor r7,r23 +37: + cpi r22,16 + brcs 45f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +45: + cpi r22,15 + brcs 51f + rcall 153f + ldi r23,1 + eor r6,r23 +51: + cpi r22,14 + brcs 59f + rcall 153f + ldi r23,129 + eor r6,r23 + ldi r17,128 + eor r7,r17 +59: + cpi r22,13 + brcs 67f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +67: + cpi r22,12 + brcs 73f + rcall 153f + ldi r23,138 + eor r6,r23 +73: + cpi r22,11 + brcs 79f + rcall 153f + ldi r23,136 + eor r6,r23 +79: + cpi r22,10 + brcs 87f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +87: + cpi r22,9 + brcs 93f + rcall 153f + ldi r23,10 + eor r6,r23 +93: + cpi r22,8 + brcs 101f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +101: + cpi r22,7 + brcs 107f + rcall 153f + ldi r23,139 + eor r6,r23 +107: + cpi r22,6 + brcs 115f + rcall 153f + ldi r23,137 + eor r6,r23 + ldi r17,128 + eor r7,r17 +115: + cpi r22,5 + brcs 123f + rcall 153f + ldi r23,3 + eor r6,r23 + ldi r17,128 + eor r7,r17 +123: + cpi r22,4 + brcs 131f + rcall 153f + ldi r23,2 + eor r6,r23 + ldi r17,128 + eor r7,r17 +131: + cpi r22,3 + brcs 137f + rcall 153f + ldi r23,128 + eor r6,r23 +137: + cpi r22,2 + brcs 145f + rcall 153f + ldi r23,10 + eor r6,r23 + ldi r17,128 + eor r7,r17 +145: + cpi r22,1 + brcs 151f + rcall 153f + ldi r23,10 + eor r6,r23 +151: + rjmp 1004f +153: + movw r18,r6 + ldd r0,Z+10 + eor r18,r0 + ldd r0,Z+11 + eor r19,r0 + ldd r0,Z+20 + eor r18,r0 + ldd r0,Z+21 + eor r19,r0 + ldd r0,Z+30 + eor r18,r0 + ldd r0,Z+31 + eor r19,r0 + ldd r0,Z+40 + eor r18,r0 + ldd r0,Z+41 + eor r19,r0 + movw r20,r8 + ldd r0,Z+12 + eor r20,r0 + ldd r0,Z+13 + eor r21,r0 + ldd r0,Z+22 + eor r20,r0 + ldd r0,Z+23 + eor r21,r0 + ldd r0,Z+32 + eor r20,r0 + ldd r0,Z+33 + eor r21,r0 + ldd r0,Z+42 + eor r20,r0 + ldd r0,Z+43 + eor r21,r0 + movw r26,r10 + ldd r0,Z+14 + eor r26,r0 + ldd r0,Z+15 + eor r27,r0 + ldd r0,Z+24 + eor r26,r0 + ldd r0,Z+25 + eor r27,r0 + ldd r0,Z+34 + eor r26,r0 + ldd r0,Z+35 + eor r27,r0 + ldd r0,Z+44 + eor r26,r0 + ldd r0,Z+45 + eor r27,r0 + movw r2,r12 + ldd r0,Z+16 + eor r2,r0 + ldd r0,Z+17 + eor r3,r0 + ldd r0,Z+26 + eor r2,r0 + ldd r0,Z+27 + eor r3,r0 + ldd r0,Z+36 + eor r2,r0 + ldd r0,Z+37 + eor r3,r0 + ldd r0,Z+46 + eor r2,r0 + ldd r0,Z+47 + eor r3,r0 + movw r4,r14 + ldd r0,Z+18 + eor r4,r0 + ldd r0,Z+19 + eor r5,r0 + ldd r0,Z+28 + eor r4,r0 + ldd r0,Z+29 + eor r5,r0 + ldd r0,Z+38 + eor r4,r0 + ldd r0,Z+39 + eor r5,r0 + ldd r0,Z+48 + eor r4,r0 + ldd r0,Z+49 + eor r5,r0 + movw r24,r20 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r4 + eor r25,r5 + eor r6,r24 + eor r7,r25 + ldd r0,Z+10 + eor r0,r24 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r25 + std Z+11,r0 + ldd r0,Z+20 + eor r0,r24 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r25 + std Z+21,r0 + ldd r0,Z+30 + eor r0,r24 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r25 + std Z+31,r0 + ldd r0,Z+40 + eor r0,r24 + std Z+40,r0 + ldd r0,Z+41 + eor r0,r25 + std Z+41,r0 + movw r24,r26 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r8,r24 + eor r9,r25 + ldd r0,Z+12 + eor r0,r24 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r25 + std Z+13,r0 + ldd r0,Z+22 + eor r0,r24 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r25 + std Z+23,r0 + ldd r0,Z+32 + eor r0,r24 + std Z+32,r0 + ldd r0,Z+33 + eor r0,r25 + std Z+33,r0 + ldd r0,Z+42 + eor r0,r24 + std Z+42,r0 + ldd r0,Z+43 + eor r0,r25 + std Z+43,r0 + movw r24,r2 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r10,r24 + eor r11,r25 + ldd r0,Z+14 + eor r0,r24 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r25 + std Z+15,r0 + ldd r0,Z+24 + eor r0,r24 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r25 + std Z+25,r0 + ldd r0,Z+34 + eor r0,r24 + std Z+34,r0 + ldd r0,Z+35 + eor r0,r25 + std Z+35,r0 + ldd r0,Z+44 + eor r0,r24 + std Z+44,r0 + ldd r0,Z+45 + eor r0,r25 + std Z+45,r0 + movw r24,r4 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r26 + eor r25,r27 + eor r12,r24 + eor r13,r25 + ldd r0,Z+16 + eor r0,r24 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r25 + std Z+17,r0 + ldd r0,Z+26 + eor r0,r24 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r25 + std Z+27,r0 + ldd r0,Z+36 + eor r0,r24 + std Z+36,r0 + ldd r0,Z+37 + eor r0,r25 + std Z+37,r0 + ldd r0,Z+46 + eor r0,r24 + std Z+46,r0 + ldd r0,Z+47 + eor r0,r25 + std Z+47,r0 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r2 + eor r25,r3 + eor r14,r24 + eor r15,r25 + ldd r0,Z+18 + eor r0,r24 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r25 + std Z+19,r0 + ldd r0,Z+28 + eor r0,r24 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r25 + std Z+29,r0 + ldd r0,Z+38 + eor r0,r24 + std Z+38,r0 + ldd r0,Z+39 + eor r0,r25 + std Z+39,r0 + ldd r0,Z+48 + eor r0,r24 + std Z+48,r0 + ldd r0,Z+49 + eor r0,r25 + std Z+49,r0 + movw r24,r8 + ldd r8,Z+12 + ldd r9,Z+13 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldd r18,Z+18 + ldd r19,Z+19 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+12,r18 + std Z+13,r19 + ldd r18,Z+44 + ldd r19,Z+45 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+18,r18 + std Z+19,r19 + ldd r18,Z+28 + ldd r19,Z+29 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+44,r18 + std Z+45,r19 + ldd r18,Z+40 + ldd r19,Z+41 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+28,r18 + std Z+29,r19 + movw r18,r10 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+40,r18 + std Z+41,r19 + ldd r10,Z+24 + ldd r11,Z+25 + mov r0,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldd r18,Z+26 + ldd r19,Z+27 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+24,r18 + std Z+25,r19 + ldd r18,Z+38 + ldd r19,Z+39 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+26,r18 + std Z+27,r19 + ldd r18,Z+46 + ldd r19,Z+47 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+38,r18 + std Z+39,r19 + ldd r18,Z+30 + ldd r19,Z+31 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+46,r18 + std Z+47,r19 + movw r18,r14 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+30,r18 + std Z+31,r19 + ldd r14,Z+48 + ldd r15,Z+49 + mov r0,r1 + lsr r15 + ror r14 + ror r0 + lsr r15 + ror r14 + ror r0 + or r15,r0 + ldd r18,Z+42 + ldd r19,Z+43 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+48,r18 + std Z+49,r19 + ldd r18,Z+16 + ldd r19,Z+17 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+42,r18 + std Z+43,r19 + ldd r18,Z+32 + ldd r19,Z+33 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+16,r18 + std Z+17,r19 + ldd r18,Z+10 + ldd r19,Z+11 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+32,r18 + std Z+33,r19 + movw r18,r12 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+10,r18 + std Z+11,r19 + ldd r12,Z+36 + ldd r13,Z+37 + mov r0,r13 + mov r13,r12 + mov r12,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + ldd r18,Z+34 + ldd r19,Z+35 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+36,r18 + std Z+37,r19 + ldd r18,Z+22 + ldd r19,Z+23 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+34,r18 + std Z+35,r19 + ldd r18,Z+14 + ldd r19,Z+15 + mov r0,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+22,r18 + std Z+23,r19 + ldd r18,Z+20 + ldd r19,Z+21 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+14,r18 + std Z+15,r19 + lsl r24 + rol r25 + adc r24,r1 + std Z+20,r24 + std Z+21,r25 + movw r18,r6 + movw r20,r8 + movw r26,r10 + movw r2,r12 + movw r4,r14 + movw r6,r26 + mov r0,r20 + com r0 + and r6,r0 + mov r0,r21 + com r0 + and r7,r0 + eor r6,r18 + eor r7,r19 + movw r8,r2 + mov r0,r26 + com r0 + and r8,r0 + mov r0,r27 + com r0 + and r9,r0 + eor r8,r20 + eor r9,r21 + movw r10,r4 + mov r0,r2 + com r0 + and r10,r0 + mov r0,r3 + com r0 + and r11,r0 + eor r10,r26 + eor r11,r27 + movw r12,r18 + mov r0,r4 + com r0 + and r12,r0 + mov r0,r5 + com r0 + and r13,r0 + eor r12,r2 + eor r13,r3 + movw r14,r20 + mov r0,r18 + com r0 + and r14,r0 + mov r0,r19 + com r0 + and r15,r0 + eor r14,r4 + eor r15,r5 + ldd r18,Z+10 + ldd r19,Z+11 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+10,r24 + std Z+11,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+12,r24 + std Z+13,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+14,r24 + std Z+15,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+16,r24 + std Z+17,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+18,r24 + std Z+19,r25 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+20,r24 + std Z+21,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+22,r24 + std Z+23,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+24,r24 + std Z+25,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+26,r24 + std Z+27,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+28,r24 + std Z+29,r25 + ldd r18,Z+30 + ldd r19,Z+31 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+30,r24 + std Z+31,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+32,r24 + std Z+33,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+34,r24 + std Z+35,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+36,r24 + std Z+37,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+38,r24 + std Z+39,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + ldd r4,Z+48 + ldd r5,Z+49 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+40,r24 + std Z+41,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+42,r24 + std Z+43,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+44,r24 + std Z+45,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+46,r24 + std Z+47,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+48,r24 + std Z+49,r25 + ret +1004: + st Z,r6 + std Z+1,r7 + std Z+2,r8 + std Z+3,r9 + std Z+4,r10 + std Z+5,r11 + std Z+6,r12 + std Z+7,r13 + std Z+8,r14 + std Z+9,r15 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size keccakp_400_permute, .-keccakp_400_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.c b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.c index c3c4011..60539df 100644 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.c +++ b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.c @@ -22,74 +22,79 @@ #include "internal-keccak.h" +#if !defined(__AVR__) + /* Faster method to compute ((x + y) % 5) that avoids the division */ static unsigned char const addMod5Table[9] = { 0, 1, 2, 3, 4, 0, 1, 2, 3 }; #define addMod5(x, y) (addMod5Table[(x) + (y)]) -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds) +void keccakp_200_permute(keccakp_200_state_t *state) { static uint8_t const RC[18] = { 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, 0x02, 0x80 }; - uint8_t B[5][5]; + uint8_t C[5]; uint8_t D; unsigned round; unsigned index, index2; - for (round = 18 - rounds; round < 18; ++round) { + for (round = 0; round < 18; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_8(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_8(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate4_8(state->A[0][3]); - B[2][0] = leftRotate1_8(state->A[0][1]); - B[3][0] = leftRotate3_8(state->A[0][4]); - B[4][0] = leftRotate6_8(state->A[0][2]); - B[0][1] = leftRotate4_8(state->A[1][1]); - B[1][1] = leftRotate4_8(state->A[1][4]); - B[2][1] = leftRotate6_8(state->A[1][2]); - B[3][1] = leftRotate4_8(state->A[1][0]); - B[4][1] = leftRotate7_8(state->A[1][3]); - B[0][2] = leftRotate3_8(state->A[2][2]); - B[1][2] = leftRotate3_8(state->A[2][0]); - B[2][2] = leftRotate1_8(state->A[2][3]); - B[3][2] = leftRotate2_8(state->A[2][1]); - B[4][2] = leftRotate7_8(state->A[2][4]); - B[0][3] = leftRotate5_8(state->A[3][3]); - B[1][3] = leftRotate5_8(state->A[3][1]); - B[2][3] = state->A[3][4]; - B[3][3] = leftRotate7_8(state->A[3][2]); - B[4][3] = leftRotate1_8(state->A[3][0]); - B[0][4] = leftRotate6_8(state->A[4][4]); - B[1][4] = leftRotate5_8(state->A[4][2]); - B[2][4] = leftRotate2_8(state->A[4][0]); - B[3][4] = state->A[4][3]; - B[4][4] = leftRotate2_8(state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate4_8(state->A[1][1]); + state->A[1][1] = leftRotate4_8(state->A[1][4]); + state->A[1][4] = leftRotate5_8(state->A[4][2]); + state->A[4][2] = leftRotate7_8(state->A[2][4]); + state->A[2][4] = leftRotate2_8(state->A[4][0]); + state->A[4][0] = leftRotate6_8(state->A[0][2]); + state->A[0][2] = leftRotate3_8(state->A[2][2]); + state->A[2][2] = leftRotate1_8(state->A[2][3]); + state->A[2][3] = state->A[3][4]; + state->A[3][4] = state->A[4][3]; + state->A[4][3] = leftRotate1_8(state->A[3][0]); + state->A[3][0] = leftRotate3_8(state->A[0][4]); + state->A[0][4] = leftRotate6_8(state->A[4][4]); + state->A[4][4] = leftRotate2_8(state->A[4][1]); + state->A[4][1] = leftRotate7_8(state->A[1][3]); + state->A[1][3] = leftRotate5_8(state->A[3][1]); + state->A[3][1] = leftRotate4_8(state->A[1][0]); + state->A[1][0] = leftRotate4_8(state->A[0][3]); + state->A[0][3] = leftRotate5_8(state->A[3][3]); + state->A[3][3] = leftRotate7_8(state->A[3][2]); + state->A[3][2] = leftRotate2_8(state->A[2][1]); + state->A[2][1] = leftRotate6_8(state->A[1][2]); + state->A[1][2] = leftRotate3_8(state->A[2][0]); + state->A[2][0] = leftRotate1_8(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -110,61 +115,64 @@ void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, 0x8002, 0x0080, 0x800A, 0x000A }; - uint16_t B[5][5]; + uint16_t C[5]; uint16_t D; unsigned round; unsigned index, index2; for (round = 20 - rounds; round < 20; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_16(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_16(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate12_16(state->A[0][3]); - B[2][0] = leftRotate1_16 (state->A[0][1]); - B[3][0] = leftRotate11_16(state->A[0][4]); - B[4][0] = leftRotate14_16(state->A[0][2]); - B[0][1] = leftRotate12_16(state->A[1][1]); - B[1][1] = leftRotate4_16 (state->A[1][4]); - B[2][1] = leftRotate6_16 (state->A[1][2]); - B[3][1] = leftRotate4_16 (state->A[1][0]); - B[4][1] = leftRotate7_16 (state->A[1][3]); - B[0][2] = leftRotate11_16(state->A[2][2]); - B[1][2] = leftRotate3_16 (state->A[2][0]); - B[2][2] = leftRotate9_16 (state->A[2][3]); - B[3][2] = leftRotate10_16(state->A[2][1]); - B[4][2] = leftRotate7_16 (state->A[2][4]); - B[0][3] = leftRotate5_16 (state->A[3][3]); - B[1][3] = leftRotate13_16(state->A[3][1]); - B[2][3] = leftRotate8_16 (state->A[3][4]); - B[3][3] = leftRotate15_16(state->A[3][2]); - B[4][3] = leftRotate9_16 (state->A[3][0]); - B[0][4] = leftRotate14_16(state->A[4][4]); - B[1][4] = leftRotate13_16(state->A[4][2]); - B[2][4] = leftRotate2_16 (state->A[4][0]); - B[3][4] = leftRotate8_16 (state->A[4][3]); - B[4][4] = leftRotate2_16 (state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate12_16(state->A[1][1]); + state->A[1][1] = leftRotate4_16 (state->A[1][4]); + state->A[1][4] = leftRotate13_16(state->A[4][2]); + state->A[4][2] = leftRotate7_16 (state->A[2][4]); + state->A[2][4] = leftRotate2_16 (state->A[4][0]); + state->A[4][0] = leftRotate14_16(state->A[0][2]); + state->A[0][2] = leftRotate11_16(state->A[2][2]); + state->A[2][2] = leftRotate9_16 (state->A[2][3]); + state->A[2][3] = leftRotate8_16 (state->A[3][4]); + state->A[3][4] = leftRotate8_16 (state->A[4][3]); + state->A[4][3] = leftRotate9_16 (state->A[3][0]); + state->A[3][0] = leftRotate11_16(state->A[0][4]); + state->A[0][4] = leftRotate14_16(state->A[4][4]); + state->A[4][4] = leftRotate2_16 (state->A[4][1]); + state->A[4][1] = leftRotate7_16 (state->A[1][3]); + state->A[1][3] = leftRotate13_16(state->A[3][1]); + state->A[3][1] = leftRotate4_16 (state->A[1][0]); + state->A[1][0] = leftRotate12_16(state->A[0][3]); + state->A[0][3] = leftRotate5_16 (state->A[3][3]); + state->A[3][3] = leftRotate15_16(state->A[3][2]); + state->A[3][2] = leftRotate10_16(state->A[2][1]); + state->A[2][1] = leftRotate6_16 (state->A[1][2]); + state->A[1][2] = leftRotate3_16 (state->A[2][0]); + state->A[2][0] = leftRotate1_16(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -202,3 +210,5 @@ void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) } #endif + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.h b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.h index 026da50..2ffef42 100644 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.h +++ b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-keccak.h @@ -68,9 +68,8 @@ typedef union * \brief Permutes the Keccak-p[200] state. * * \param state The Keccak-p[200] state to be permuted. - * \param rounds The number of rounds to perform (up to 18). */ -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds); +void keccakp_200_permute(keccakp_200_state_t *state); /** * \brief Permutes the Keccak-p[400] state, which is assumed to be in diff --git a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-util.h b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-util.h index e79158c..e30166d 100644 --- a/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-util.h +++ b/isap/Implementations/crypto_aead/isapk128av20/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.c b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.h b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/api.h b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/encrypt.c b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/encrypt.c deleted file mode 100644 index 72d2d68..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "isap.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_keccak_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return isap_keccak_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon-avr.S deleted file mode 100644 index e8a4fb4..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon-avr.S +++ /dev/null @@ -1,778 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global ascon_permute - .type ascon_permute, @function -ascon_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r18,15 - sub r18,r22 - swap r18 - or r22,r18 - ldd r3,Z+16 - ldd r2,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 -20: - eor r18,r22 - ldd r23,Z+7 - ldd r12,Z+15 - ldd r13,Z+31 - eor r23,r4 - eor r4,r13 - eor r18,r12 - mov r14,r23 - mov r15,r12 - mov r24,r18 - mov r25,r13 - mov r16,r4 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r18 - and r24,r13 - and r25,r4 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r18,r25 - eor r13,r16 - eor r4,r14 - eor r12,r23 - eor r23,r4 - eor r13,r18 - com r18 - std Z+7,r23 - std Z+15,r12 - std Z+31,r13 - std Z+39,r4 - ldd r23,Z+6 - ldd r12,Z+14 - ldd r13,Z+30 - eor r23,r5 - eor r5,r13 - eor r19,r12 - mov r14,r23 - mov r15,r12 - mov r24,r19 - mov r25,r13 - mov r16,r5 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r19 - and r24,r13 - and r25,r5 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r19,r25 - eor r13,r16 - eor r5,r14 - eor r12,r23 - eor r23,r5 - eor r13,r19 - com r19 - std Z+6,r23 - std Z+14,r12 - std Z+30,r13 - std Z+38,r5 - ldd r23,Z+5 - ldd r12,Z+13 - ldd r13,Z+29 - eor r23,r6 - eor r6,r13 - eor r20,r12 - mov r14,r23 - mov r15,r12 - mov r24,r20 - mov r25,r13 - mov r16,r6 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r20 - and r24,r13 - and r25,r6 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r20,r25 - eor r13,r16 - eor r6,r14 - eor r12,r23 - eor r23,r6 - eor r13,r20 - com r20 - std Z+5,r23 - std Z+13,r12 - std Z+29,r13 - std Z+37,r6 - ldd r23,Z+4 - ldd r12,Z+12 - ldd r13,Z+28 - eor r23,r7 - eor r7,r13 - eor r21,r12 - mov r14,r23 - mov r15,r12 - mov r24,r21 - mov r25,r13 - mov r16,r7 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r21 - and r24,r13 - and r25,r7 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r21,r25 - eor r13,r16 - eor r7,r14 - eor r12,r23 - eor r23,r7 - eor r13,r21 - com r21 - std Z+4,r23 - std Z+12,r12 - std Z+28,r13 - std Z+36,r7 - ldd r23,Z+3 - ldd r12,Z+11 - ldd r13,Z+27 - eor r23,r8 - eor r8,r13 - eor r26,r12 - mov r14,r23 - mov r15,r12 - mov r24,r26 - mov r25,r13 - mov r16,r8 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r26 - and r24,r13 - and r25,r8 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r26,r25 - eor r13,r16 - eor r8,r14 - eor r12,r23 - eor r23,r8 - eor r13,r26 - com r26 - std Z+3,r23 - std Z+11,r12 - std Z+27,r13 - std Z+35,r8 - ldd r23,Z+2 - ldd r12,Z+10 - ldd r13,Z+26 - eor r23,r9 - eor r9,r13 - eor r27,r12 - mov r14,r23 - mov r15,r12 - mov r24,r27 - mov r25,r13 - mov r16,r9 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r27 - and r24,r13 - and r25,r9 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r27,r25 - eor r13,r16 - eor r9,r14 - eor r12,r23 - eor r23,r9 - eor r13,r27 - com r27 - std Z+2,r23 - std Z+10,r12 - std Z+26,r13 - std Z+34,r9 - ldd r23,Z+1 - ldd r12,Z+9 - ldd r13,Z+25 - eor r23,r10 - eor r10,r13 - eor r2,r12 - mov r14,r23 - mov r15,r12 - mov r24,r2 - mov r25,r13 - mov r16,r10 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r2 - and r24,r13 - and r25,r10 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r2,r25 - eor r13,r16 - eor r10,r14 - eor r12,r23 - eor r23,r10 - eor r13,r2 - com r2 - std Z+1,r23 - std Z+9,r12 - std Z+25,r13 - std Z+33,r10 - ld r23,Z - ldd r12,Z+8 - ldd r13,Z+24 - eor r23,r11 - eor r11,r13 - eor r3,r12 - mov r14,r23 - mov r15,r12 - mov r24,r3 - mov r25,r13 - mov r16,r11 - com r14 - com r15 - com r24 - com r25 - com r16 - and r14,r12 - and r15,r3 - and r24,r13 - and r25,r11 - and r16,r23 - eor r23,r15 - eor r12,r24 - eor r3,r25 - eor r13,r16 - eor r11,r14 - eor r12,r23 - eor r23,r11 - eor r13,r3 - com r3 - st Z,r23 - std Z+8,r12 - std Z+24,r13 - std Z+32,r11 - ld r11,Z - ldd r10,Z+1 - ldd r9,Z+2 - ldd r8,Z+3 - ldd r7,Z+4 - ldd r6,Z+5 - ldd r5,Z+6 - ldd r4,Z+7 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r14 - mov r14,r24 - mov r24,r16 - mov r16,r0 - mov r0,r13 - mov r13,r15 - mov r15,r25 - mov r25,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r4 - mov r0,r5 - push r6 - mov r4,r7 - mov r5,r8 - mov r6,r9 - mov r7,r10 - mov r8,r11 - pop r11 - mov r10,r0 - mov r9,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - st Z,r11 - std Z+1,r10 - std Z+2,r9 - std Z+3,r8 - std Z+4,r7 - std Z+5,r6 - std Z+6,r5 - std Z+7,r4 - ldd r11,Z+8 - ldd r10,Z+9 - ldd r9,Z+10 - ldd r8,Z+11 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - lsl r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r4,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+8,r11 - std Z+9,r10 - std Z+10,r9 - std Z+11,r8 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - movw r12,r18 - movw r14,r20 - movw r24,r26 - movw r16,r2 - bst r12,0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - bld r17,7 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - eor r24,r26 - eor r25,r27 - eor r16,r2 - eor r17,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r26 - mov r26,r27 - mov r27,r2 - mov r2,r3 - mov r3,r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r2 - rol r3 - adc r18,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r26,r24 - eor r27,r25 - eor r2,r16 - eor r3,r17 - ldd r11,Z+24 - ldd r10,Z+25 - ldd r9,Z+26 - ldd r8,Z+27 - ldd r7,Z+28 - ldd r6,Z+29 - ldd r5,Z+30 - ldd r4,Z+31 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - mov r0,r1 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r0 - or r17,r0 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r0,r4 - mov r4,r6 - mov r6,r8 - mov r8,r10 - mov r10,r0 - mov r0,r5 - mov r5,r7 - mov r7,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - std Z+24,r11 - std Z+25,r10 - std Z+26,r9 - std Z+27,r8 - std Z+28,r7 - std Z+29,r6 - std Z+30,r5 - std Z+31,r4 - ldd r11,Z+32 - ldd r10,Z+33 - ldd r9,Z+34 - ldd r8,Z+35 - ldd r7,Z+36 - ldd r6,Z+37 - ldd r5,Z+38 - ldd r4,Z+39 - movw r12,r4 - movw r14,r6 - movw r24,r8 - movw r16,r10 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r24 - mov r24,r25 - mov r25,r16 - mov r16,r17 - mov r17,r0 - lsl r12 - rol r13 - rol r14 - rol r15 - rol r24 - rol r25 - rol r16 - rol r17 - adc r12,r1 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - mov r23,r9 - mov r0,r10 - push r11 - mov r11,r8 - mov r10,r7 - mov r9,r6 - mov r8,r5 - mov r7,r4 - pop r6 - mov r5,r0 - mov r4,r23 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r0 - or r11,r0 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - subi r22,15 - ldi r25,60 - cpse r22,r25 - rjmp 20b - std Z+16,r3 - std Z+17,r2 - std Z+18,r27 - std Z+19,r26 - std Z+20,r21 - std Z+21,r20 - std Z+22,r19 - std Z+23,r18 - std Z+32,r11 - std Z+33,r10 - std Z+34,r9 - std Z+35,r8 - std Z+36,r7 - std Z+37,r6 - std Z+38,r5 - std Z+39,r4 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size ascon_permute, .-ascon_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.c b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.c deleted file mode 100644 index 657aabe..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-ascon.h" - -#if !defined(__AVR__) - -void ascon_permute(ascon_state_t *state, uint8_t first_round) -{ - uint64_t t0, t1, t2, t3, t4; -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = be_load_word64(state->B); - uint64_t x1 = be_load_word64(state->B + 8); - uint64_t x2 = be_load_word64(state->B + 16); - uint64_t x3 = be_load_word64(state->B + 24); - uint64_t x4 = be_load_word64(state->B + 32); -#else - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; -#endif - while (first_round < 12) { - /* Add the round constant to the state */ - x2 ^= ((0x0F - first_round) << 4) | first_round; - - /* Substitution layer - apply the s-box using bit-slicing - * according to the algorithm recommended in the specification */ - x0 ^= x4; x4 ^= x3; x2 ^= x1; - t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4; - t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0; - x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0; - x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2; - - /* Linear diffusion layer */ - x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0); - x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1); - x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2); - x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3); - x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4); - - /* Move onto the next round */ - ++first_round; - } -#if defined(LW_UTIL_LITTLE_ENDIAN) - be_store_word64(state->B, x0); - be_store_word64(state->B + 8, x1); - be_store_word64(state->B + 16, x2); - be_store_word64(state->B + 24, x3); - be_store_word64(state->B + 32, x4); -#else - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; -#endif -} - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.h b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.h deleted file mode 100644 index d3fa3ca..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-ascon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_ASCON_H -#define LW_INTERNAL_ASCON_H - -#include "internal-util.h" - -/** - * \file internal-ascon.h - * \brief Internal implementation of the ASCON permutation. - * - * References: http://competitions.cr.yp.to/round3/asconv12.pdf, - * http://ascon.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Structure of the internal state of the ASCON permutation. - */ -typedef union -{ - uint64_t S[5]; /**< Words of the state */ - uint8_t B[40]; /**< Bytes of the state */ - -} ascon_state_t; - -/** - * \brief Permutes the ASCON state. - * - * \param state The ASCON state to be permuted. - * \param first_round The first round (of 12) to be performed; 0, 4, or 6. - * - * The input and output \a state will be in big-endian byte order. - */ -void ascon_permute(ascon_state_t *state, uint8_t first_round); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-isap.h b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-isap.h deleted file mode 100644 index ba99f2a..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-isap.h +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying ISAP variant. - * - * ISAP_ALG_NAME Name of the ISAP algorithm; e.g. isap_keccak_128 - * ISAP_RATE Number of bytes in the rate for hashing and encryption. - * ISAP_sH Number of rounds for hashing. - * ISAP_sE Number of rounds for encryption. - * ISAP_sB Number of rounds for key bit absorption. - * ISAP_sK Number of rounds for keying. - * ISAP_STATE Type for the permuation state; e.g. ascon_state_t - * ISAP_PERMUTE(s,r) Permutes the state "s" with number of rounds "r". - */ -#if defined(ISAP_ALG_NAME) - -#define ISAP_CONCAT_INNER(name,suffix) name##suffix -#define ISAP_CONCAT(name,suffix) ISAP_CONCAT_INNER(name,suffix) - -/* IV string for initialising the associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_A) - [sizeof(ISAP_STATE) - ISAP_NONCE_SIZE] = { - 0x01, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for authenticating associated data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x02, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/* IV string for encrypting payload data */ -static unsigned char const ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE) - [sizeof(ISAP_STATE) - ISAP_KEY_SIZE] = { - 0x03, ISAP_KEY_SIZE * 8, ISAP_RATE * 8, 1, - ISAP_sH, ISAP_sB, ISAP_sE, ISAP_sK -}; - -/** - * \brief Re-keys the ISAP permutation state. - * - * \param state The permutation state to be re-keyed. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param iv Points to the initialization vector for this re-keying operation. - * \param data Points to the data to be absorbed to perform the re-keying. - * \param data_len Length of the data to be absorbed. - * - * The output key will be left in the leading bytes of \a state. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *iv, - const unsigned char *data, unsigned data_len) -{ - unsigned bit, num_bits; - - /* Initialize the state with the key and IV */ - memcpy(state->B, k, ISAP_KEY_SIZE); - memcpy(state->B + ISAP_KEY_SIZE, iv, sizeof(state->B) - ISAP_KEY_SIZE); - ISAP_PERMUTE(state, ISAP_sK); - - /* Absorb all of the bits of the data buffer one by one */ - num_bits = data_len * 8 - 1; - for (bit = 0; bit < num_bits; ++bit) { - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sB); - } - state->B[0] ^= (data[bit / 8] << (bit % 8)) & 0x80; - ISAP_PERMUTE(state, ISAP_sK); -} - -/** - * \brief Encrypts (or decrypts) a message payload with ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param c Buffer to receive the output ciphertext. - * \param m Buffer to receive the input plaintext. - * \param mlen Length of the input plaintext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_encrypt) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Set up the re-keyed encryption key and nonce in the state */ - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KE), npub, ISAP_NONCE_SIZE); - memcpy(state->B + sizeof(ISAP_STATE) - ISAP_NONCE_SIZE, - npub, ISAP_NONCE_SIZE); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= ISAP_RATE) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, ISAP_RATE); - c += ISAP_RATE; - m += ISAP_RATE; - mlen -= ISAP_RATE; - } - if (mlen > 0) { - ISAP_PERMUTE(state, ISAP_sE); - lw_xor_block_2_src(c, state->B, m, (unsigned)mlen); - } -} - -/** - * \brief Authenticates the associated data and ciphertext using ISAP. - * - * \param state ISAP permutation state. - * \param k Points to the 128-bit key for the ISAP cipher. - * \param npub Points to the 128-bit nonce for the ISAP cipher. - * \param ad Buffer containing the associated data. - * \param adlen Length of the associated data. - * \param c Buffer containing the ciphertext. - * \param clen Length of the ciphertext. - */ -static void ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (ISAP_STATE *state, const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *c, unsigned long long clen, - unsigned char *tag) -{ - unsigned char preserve[sizeof(ISAP_STATE) - ISAP_TAG_SIZE]; - unsigned temp; - - /* Absorb the associated data */ - memcpy(state->B, npub, ISAP_NONCE_SIZE); - memcpy(state->B + ISAP_NONCE_SIZE, ISAP_CONCAT(ISAP_ALG_NAME,_IV_A), - sizeof(state->B) - ISAP_NONCE_SIZE); - ISAP_PERMUTE(state, ISAP_sH); - while (adlen >= ISAP_RATE) { - lw_xor_block(state->B, ad, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - ad += ISAP_RATE; - adlen -= ISAP_RATE; - } - temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - state->B[sizeof(state->B) - 1] ^= 0x01; /* domain separation */ - - /* Absorb the ciphertext */ - while (clen >= ISAP_RATE) { - lw_xor_block(state->B, c, ISAP_RATE); - ISAP_PERMUTE(state, ISAP_sH); - c += ISAP_RATE; - clen -= ISAP_RATE; - } - temp = (unsigned)clen; - lw_xor_block(state->B, c, temp); - state->B[temp] ^= 0x80; /* padding */ - ISAP_PERMUTE(state, ISAP_sH); - - /* Re-key the state and generate the authentication tag */ - memcpy(tag, state->B, ISAP_TAG_SIZE); - memcpy(preserve, state->B + ISAP_TAG_SIZE, sizeof(preserve)); - ISAP_CONCAT(ISAP_ALG_NAME,_rekey) - (state, k, ISAP_CONCAT(ISAP_ALG_NAME,_IV_KA), tag, ISAP_TAG_SIZE); - memcpy(state->B + ISAP_TAG_SIZE, preserve, sizeof(preserve)); - ISAP_PERMUTE(state, ISAP_sH); - memcpy(tag, state->B, ISAP_TAG_SIZE); -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ISAP_TAG_SIZE; - - /* Encrypt the plaintext to produce the ciphertext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, c, m, mlen); - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac) - (&state, k, npub, ad, adlen, c, mlen, c + mlen); - return 0; -} - -int ISAP_CONCAT(ISAP_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - ISAP_STATE state; - unsigned char tag[ISAP_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ISAP_TAG_SIZE) - return -1; - *mlen = clen - ISAP_TAG_SIZE; - - /* Authenticate the associated data and ciphertext to generate the tag */ - ISAP_CONCAT(ISAP_ALG_NAME,_mac)(&state, k, npub, ad, adlen, c, *mlen, tag); - - /* Decrypt the ciphertext to produce the plaintext */ - ISAP_CONCAT(ISAP_ALG_NAME,_encrypt)(&state, k, npub, m, c, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, tag, c + *mlen, ISAP_TAG_SIZE); -} - -#endif /* ISAP_ALG_NAME */ - -/* Now undefine everything so that we can include this file again for - * another variant on the ISAP algorithm */ -#undef ISAP_ALG_NAME -#undef ISAP_RATE -#undef ISAP_sH -#undef ISAP_sE -#undef ISAP_sB -#undef ISAP_sK -#undef ISAP_STATE -#undef ISAP_PERMUTE -#undef ISAP_CONCAT_INNER -#undef ISAP_CONCAT diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak-avr.S deleted file mode 100644 index e50ccaf..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak-avr.S +++ /dev/null @@ -1,1552 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global keccakp_200_permute - .type keccakp_200_permute, @function -keccakp_200_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r26,Z+6 - ldd r27,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r4,Z+12 - ldd r5,Z+13 - ldd r6,Z+14 - ldd r7,Z+15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - ldd r24,Z+24 - push r31 - push r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,130 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - mov r30,r1 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,1 - eor r18,r30 - rcall 82f - ldi r30,129 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,138 - eor r18,r30 - rcall 82f - ldi r30,136 - eor r18,r30 - rcall 82f - ldi r30,9 - eor r18,r30 - rcall 82f - ldi r30,10 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,139 - eor r18,r30 - rcall 82f - ldi r30,137 - eor r18,r30 - rcall 82f - ldi r30,3 - eor r18,r30 - rcall 82f - ldi r30,2 - eor r18,r30 - rcall 82f - ldi r30,128 - eor r18,r30 - rjmp 420f -82: - mov r30,r18 - eor r30,r23 - eor r30,r2 - eor r30,r7 - eor r30,r12 - mov r31,r19 - eor r31,r26 - eor r31,r3 - eor r31,r8 - eor r31,r13 - mov r25,r20 - eor r25,r27 - eor r25,r4 - eor r25,r9 - eor r25,r14 - mov r16,r21 - eor r16,r28 - eor r16,r5 - eor r16,r10 - eor r16,r15 - mov r17,r22 - eor r17,r29 - eor r17,r6 - eor r17,r11 - eor r17,r24 - mov r0,r31 - lsl r0 - adc r0,r1 - eor r0,r17 - eor r18,r0 - eor r23,r0 - eor r2,r0 - eor r7,r0 - eor r12,r0 - mov r0,r25 - lsl r0 - adc r0,r1 - eor r0,r30 - eor r19,r0 - eor r26,r0 - eor r3,r0 - eor r8,r0 - eor r13,r0 - mov r0,r16 - lsl r0 - adc r0,r1 - eor r0,r31 - eor r20,r0 - eor r27,r0 - eor r4,r0 - eor r9,r0 - eor r14,r0 - mov r0,r17 - lsl r0 - adc r0,r1 - eor r0,r25 - eor r21,r0 - eor r28,r0 - eor r5,r0 - eor r10,r0 - eor r15,r0 - mov r0,r30 - lsl r0 - adc r0,r1 - eor r0,r16 - eor r22,r0 - eor r29,r0 - eor r6,r0 - eor r11,r0 - eor r24,r0 - mov r30,r19 - swap r26 - mov r19,r26 - swap r29 - mov r26,r29 - mov r0,r1 - lsr r14 - ror r0 - lsr r14 - ror r0 - lsr r14 - ror r0 - or r14,r0 - mov r29,r14 - bst r6,0 - lsr r6 - bld r6,7 - mov r14,r6 - lsl r12 - adc r12,r1 - lsl r12 - adc r12,r1 - mov r6,r12 - mov r0,r1 - lsr r20 - ror r0 - lsr r20 - ror r0 - or r20,r0 - mov r12,r20 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - mov r20,r4 - lsl r5 - adc r5,r1 - mov r4,r5 - mov r5,r11 - mov r11,r15 - lsl r7 - adc r7,r1 - mov r15,r7 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - mov r7,r22 - mov r0,r1 - lsr r24 - ror r0 - lsr r24 - ror r0 - or r24,r0 - mov r22,r24 - lsl r13 - adc r13,r1 - lsl r13 - adc r13,r1 - mov r24,r13 - bst r28,0 - lsr r28 - bld r28,7 - mov r13,r28 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r28,r8 - swap r23 - mov r8,r23 - swap r21 - mov r23,r21 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r21,r10 - bst r9,0 - lsr r9 - bld r9,7 - mov r10,r9 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - mov r9,r3 - mov r0,r1 - lsr r27 - ror r0 - lsr r27 - ror r0 - or r27,r0 - mov r3,r27 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - mov r27,r2 - lsl r30 - adc r30,r1 - mov r2,r30 - mov r30,r18 - mov r31,r19 - mov r25,r20 - mov r16,r21 - mov r17,r22 - mov r18,r25 - mov r0,r31 - com r0 - and r18,r0 - eor r18,r30 - mov r19,r16 - mov r0,r25 - com r0 - and r19,r0 - eor r19,r31 - mov r20,r17 - mov r0,r16 - com r0 - and r20,r0 - eor r20,r25 - mov r21,r30 - mov r0,r17 - com r0 - and r21,r0 - eor r21,r16 - mov r22,r31 - mov r0,r30 - com r0 - and r22,r0 - eor r22,r17 - mov r30,r23 - mov r31,r26 - mov r25,r27 - mov r16,r28 - mov r17,r29 - mov r23,r25 - mov r0,r31 - com r0 - and r23,r0 - eor r23,r30 - mov r26,r16 - mov r0,r25 - com r0 - and r26,r0 - eor r26,r31 - mov r27,r17 - mov r0,r16 - com r0 - and r27,r0 - eor r27,r25 - mov r28,r30 - mov r0,r17 - com r0 - and r28,r0 - eor r28,r16 - mov r29,r31 - mov r0,r30 - com r0 - and r29,r0 - eor r29,r17 - mov r30,r2 - mov r31,r3 - mov r25,r4 - mov r16,r5 - mov r17,r6 - mov r2,r25 - mov r0,r31 - com r0 - and r2,r0 - eor r2,r30 - mov r3,r16 - mov r0,r25 - com r0 - and r3,r0 - eor r3,r31 - mov r4,r17 - mov r0,r16 - com r0 - and r4,r0 - eor r4,r25 - mov r5,r30 - mov r0,r17 - com r0 - and r5,r0 - eor r5,r16 - mov r6,r31 - mov r0,r30 - com r0 - and r6,r0 - eor r6,r17 - mov r30,r7 - mov r31,r8 - mov r25,r9 - mov r16,r10 - mov r17,r11 - mov r7,r25 - mov r0,r31 - com r0 - and r7,r0 - eor r7,r30 - mov r8,r16 - mov r0,r25 - com r0 - and r8,r0 - eor r8,r31 - mov r9,r17 - mov r0,r16 - com r0 - and r9,r0 - eor r9,r25 - mov r10,r30 - mov r0,r17 - com r0 - and r10,r0 - eor r10,r16 - mov r11,r31 - mov r0,r30 - com r0 - and r11,r0 - eor r11,r17 - mov r30,r12 - mov r31,r13 - mov r25,r14 - mov r16,r15 - mov r17,r24 - mov r12,r25 - mov r0,r31 - com r0 - and r12,r0 - eor r12,r30 - mov r13,r16 - mov r0,r25 - com r0 - and r13,r0 - eor r13,r31 - mov r14,r17 - mov r0,r16 - com r0 - and r14,r0 - eor r14,r25 - mov r15,r30 - mov r0,r17 - com r0 - and r15,r0 - eor r15,r16 - mov r24,r31 - mov r0,r30 - com r0 - and r24,r0 - eor r24,r17 - ret -420: - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r22 - std Z+5,r23 - std Z+6,r26 - std Z+7,r27 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r4 - std Z+13,r5 - std Z+14,r6 - std Z+15,r7 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - std Z+24,r24 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size keccakp_200_permute, .-keccakp_200_permute - - .text -.global keccakp_400_permute - .type keccakp_400_permute, @function -keccakp_400_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - movw r30,r24 -.L__stack_usage = 17 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - ldd r10,Z+4 - ldd r11,Z+5 - ldd r12,Z+6 - ldd r13,Z+7 - ldd r14,Z+8 - ldd r15,Z+9 - cpi r22,20 - brcs 15f - rcall 153f - ldi r23,1 - eor r6,r23 -15: - cpi r22,19 - brcs 23f - rcall 153f - ldi r23,130 - eor r6,r23 - ldi r17,128 - eor r7,r17 -23: - cpi r22,18 - brcs 31f - rcall 153f - ldi r23,138 - eor r6,r23 - ldi r17,128 - eor r7,r17 -31: - cpi r22,17 - brcs 37f - rcall 153f - ldi r23,128 - eor r7,r23 -37: - cpi r22,16 - brcs 45f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -45: - cpi r22,15 - brcs 51f - rcall 153f - ldi r23,1 - eor r6,r23 -51: - cpi r22,14 - brcs 59f - rcall 153f - ldi r23,129 - eor r6,r23 - ldi r17,128 - eor r7,r17 -59: - cpi r22,13 - brcs 67f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -67: - cpi r22,12 - brcs 73f - rcall 153f - ldi r23,138 - eor r6,r23 -73: - cpi r22,11 - brcs 79f - rcall 153f - ldi r23,136 - eor r6,r23 -79: - cpi r22,10 - brcs 87f - rcall 153f - ldi r23,9 - eor r6,r23 - ldi r17,128 - eor r7,r17 -87: - cpi r22,9 - brcs 93f - rcall 153f - ldi r23,10 - eor r6,r23 -93: - cpi r22,8 - brcs 101f - rcall 153f - ldi r23,139 - eor r6,r23 - ldi r17,128 - eor r7,r17 -101: - cpi r22,7 - brcs 107f - rcall 153f - ldi r23,139 - eor r6,r23 -107: - cpi r22,6 - brcs 115f - rcall 153f - ldi r23,137 - eor r6,r23 - ldi r17,128 - eor r7,r17 -115: - cpi r22,5 - brcs 123f - rcall 153f - ldi r23,3 - eor r6,r23 - ldi r17,128 - eor r7,r17 -123: - cpi r22,4 - brcs 131f - rcall 153f - ldi r23,2 - eor r6,r23 - ldi r17,128 - eor r7,r17 -131: - cpi r22,3 - brcs 137f - rcall 153f - ldi r23,128 - eor r6,r23 -137: - cpi r22,2 - brcs 145f - rcall 153f - ldi r23,10 - eor r6,r23 - ldi r17,128 - eor r7,r17 -145: - cpi r22,1 - brcs 151f - rcall 153f - ldi r23,10 - eor r6,r23 -151: - rjmp 1004f -153: - movw r18,r6 - ldd r0,Z+10 - eor r18,r0 - ldd r0,Z+11 - eor r19,r0 - ldd r0,Z+20 - eor r18,r0 - ldd r0,Z+21 - eor r19,r0 - ldd r0,Z+30 - eor r18,r0 - ldd r0,Z+31 - eor r19,r0 - ldd r0,Z+40 - eor r18,r0 - ldd r0,Z+41 - eor r19,r0 - movw r20,r8 - ldd r0,Z+12 - eor r20,r0 - ldd r0,Z+13 - eor r21,r0 - ldd r0,Z+22 - eor r20,r0 - ldd r0,Z+23 - eor r21,r0 - ldd r0,Z+32 - eor r20,r0 - ldd r0,Z+33 - eor r21,r0 - ldd r0,Z+42 - eor r20,r0 - ldd r0,Z+43 - eor r21,r0 - movw r26,r10 - ldd r0,Z+14 - eor r26,r0 - ldd r0,Z+15 - eor r27,r0 - ldd r0,Z+24 - eor r26,r0 - ldd r0,Z+25 - eor r27,r0 - ldd r0,Z+34 - eor r26,r0 - ldd r0,Z+35 - eor r27,r0 - ldd r0,Z+44 - eor r26,r0 - ldd r0,Z+45 - eor r27,r0 - movw r2,r12 - ldd r0,Z+16 - eor r2,r0 - ldd r0,Z+17 - eor r3,r0 - ldd r0,Z+26 - eor r2,r0 - ldd r0,Z+27 - eor r3,r0 - ldd r0,Z+36 - eor r2,r0 - ldd r0,Z+37 - eor r3,r0 - ldd r0,Z+46 - eor r2,r0 - ldd r0,Z+47 - eor r3,r0 - movw r4,r14 - ldd r0,Z+18 - eor r4,r0 - ldd r0,Z+19 - eor r5,r0 - ldd r0,Z+28 - eor r4,r0 - ldd r0,Z+29 - eor r5,r0 - ldd r0,Z+38 - eor r4,r0 - ldd r0,Z+39 - eor r5,r0 - ldd r0,Z+48 - eor r4,r0 - ldd r0,Z+49 - eor r5,r0 - movw r24,r20 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r4 - eor r25,r5 - eor r6,r24 - eor r7,r25 - ldd r0,Z+10 - eor r0,r24 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r25 - std Z+11,r0 - ldd r0,Z+20 - eor r0,r24 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r25 - std Z+21,r0 - ldd r0,Z+30 - eor r0,r24 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r25 - std Z+31,r0 - ldd r0,Z+40 - eor r0,r24 - std Z+40,r0 - ldd r0,Z+41 - eor r0,r25 - std Z+41,r0 - movw r24,r26 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r18 - eor r25,r19 - eor r8,r24 - eor r9,r25 - ldd r0,Z+12 - eor r0,r24 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r25 - std Z+13,r0 - ldd r0,Z+22 - eor r0,r24 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r25 - std Z+23,r0 - ldd r0,Z+32 - eor r0,r24 - std Z+32,r0 - ldd r0,Z+33 - eor r0,r25 - std Z+33,r0 - ldd r0,Z+42 - eor r0,r24 - std Z+42,r0 - ldd r0,Z+43 - eor r0,r25 - std Z+43,r0 - movw r24,r2 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r20 - eor r25,r21 - eor r10,r24 - eor r11,r25 - ldd r0,Z+14 - eor r0,r24 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r25 - std Z+15,r0 - ldd r0,Z+24 - eor r0,r24 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r25 - std Z+25,r0 - ldd r0,Z+34 - eor r0,r24 - std Z+34,r0 - ldd r0,Z+35 - eor r0,r25 - std Z+35,r0 - ldd r0,Z+44 - eor r0,r24 - std Z+44,r0 - ldd r0,Z+45 - eor r0,r25 - std Z+45,r0 - movw r24,r4 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r26 - eor r25,r27 - eor r12,r24 - eor r13,r25 - ldd r0,Z+16 - eor r0,r24 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r25 - std Z+17,r0 - ldd r0,Z+26 - eor r0,r24 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r25 - std Z+27,r0 - ldd r0,Z+36 - eor r0,r24 - std Z+36,r0 - ldd r0,Z+37 - eor r0,r25 - std Z+37,r0 - ldd r0,Z+46 - eor r0,r24 - std Z+46,r0 - ldd r0,Z+47 - eor r0,r25 - std Z+47,r0 - movw r24,r18 - lsl r24 - rol r25 - adc r24,r1 - eor r24,r2 - eor r25,r3 - eor r14,r24 - eor r15,r25 - ldd r0,Z+18 - eor r0,r24 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r25 - std Z+19,r0 - ldd r0,Z+28 - eor r0,r24 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r25 - std Z+29,r0 - ldd r0,Z+38 - eor r0,r24 - std Z+38,r0 - ldd r0,Z+39 - eor r0,r25 - std Z+39,r0 - ldd r0,Z+48 - eor r0,r24 - std Z+48,r0 - ldd r0,Z+49 - eor r0,r25 - std Z+49,r0 - movw r24,r8 - ldd r8,Z+12 - ldd r9,Z+13 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldd r18,Z+18 - ldd r19,Z+19 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+12,r18 - std Z+13,r19 - ldd r18,Z+44 - ldd r19,Z+45 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+18,r18 - std Z+19,r19 - ldd r18,Z+28 - ldd r19,Z+29 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+44,r18 - std Z+45,r19 - ldd r18,Z+40 - ldd r19,Z+41 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+28,r18 - std Z+29,r19 - movw r18,r10 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+40,r18 - std Z+41,r19 - ldd r10,Z+24 - ldd r11,Z+25 - mov r0,r11 - mov r11,r10 - mov r10,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldd r18,Z+26 - ldd r19,Z+27 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+24,r18 - std Z+25,r19 - ldd r18,Z+38 - ldd r19,Z+39 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+26,r18 - std Z+27,r19 - ldd r18,Z+46 - ldd r19,Z+47 - mov r0,r19 - mov r19,r18 - mov r18,r0 - std Z+38,r18 - std Z+39,r19 - ldd r18,Z+30 - ldd r19,Z+31 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - std Z+46,r18 - std Z+47,r19 - movw r18,r14 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+30,r18 - std Z+31,r19 - ldd r14,Z+48 - ldd r15,Z+49 - mov r0,r1 - lsr r15 - ror r14 - ror r0 - lsr r15 - ror r14 - ror r0 - or r15,r0 - ldd r18,Z+42 - ldd r19,Z+43 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+48,r18 - std Z+49,r19 - ldd r18,Z+16 - ldd r19,Z+17 - mov r0,r19 - mov r19,r18 - mov r18,r0 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+42,r18 - std Z+43,r19 - ldd r18,Z+32 - ldd r19,Z+33 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+16,r18 - std Z+17,r19 - ldd r18,Z+10 - ldd r19,Z+11 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+32,r18 - std Z+33,r19 - movw r18,r12 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+10,r18 - std Z+11,r19 - ldd r12,Z+36 - ldd r13,Z+37 - mov r0,r13 - mov r13,r12 - mov r12,r0 - mov r0,r1 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - lsr r13 - ror r12 - ror r0 - or r13,r0 - ldd r18,Z+34 - ldd r19,Z+35 - bst r18,0 - lsr r19 - ror r18 - bld r19,7 - std Z+36,r18 - std Z+37,r19 - ldd r18,Z+22 - ldd r19,Z+23 - mov r0,r19 - mov r19,r18 - mov r18,r0 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+34,r18 - std Z+35,r19 - ldd r18,Z+14 - ldd r19,Z+15 - mov r0,r19 - mov r19,r18 - mov r18,r0 - mov r0,r1 - lsr r19 - ror r18 - ror r0 - lsr r19 - ror r18 - ror r0 - or r19,r0 - std Z+22,r18 - std Z+23,r19 - ldd r18,Z+20 - ldd r19,Z+21 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - lsl r18 - rol r19 - adc r18,r1 - std Z+14,r18 - std Z+15,r19 - lsl r24 - rol r25 - adc r24,r1 - std Z+20,r24 - std Z+21,r25 - movw r18,r6 - movw r20,r8 - movw r26,r10 - movw r2,r12 - movw r4,r14 - movw r6,r26 - mov r0,r20 - com r0 - and r6,r0 - mov r0,r21 - com r0 - and r7,r0 - eor r6,r18 - eor r7,r19 - movw r8,r2 - mov r0,r26 - com r0 - and r8,r0 - mov r0,r27 - com r0 - and r9,r0 - eor r8,r20 - eor r9,r21 - movw r10,r4 - mov r0,r2 - com r0 - and r10,r0 - mov r0,r3 - com r0 - and r11,r0 - eor r10,r26 - eor r11,r27 - movw r12,r18 - mov r0,r4 - com r0 - and r12,r0 - mov r0,r5 - com r0 - and r13,r0 - eor r12,r2 - eor r13,r3 - movw r14,r20 - mov r0,r18 - com r0 - and r14,r0 - mov r0,r19 - com r0 - and r15,r0 - eor r14,r4 - eor r15,r5 - ldd r18,Z+10 - ldd r19,Z+11 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r26,Z+14 - ldd r27,Z+15 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+10,r24 - std Z+11,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+12,r24 - std Z+13,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+14,r24 - std Z+15,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+16,r24 - std Z+17,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+18,r24 - std Z+19,r25 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+20,r24 - std Z+21,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+22,r24 - std Z+23,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+24,r24 - std Z+25,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+26,r24 - std Z+27,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+28,r24 - std Z+29,r25 - ldd r18,Z+30 - ldd r19,Z+31 - ldd r20,Z+32 - ldd r21,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+30,r24 - std Z+31,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+32,r24 - std Z+33,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+34,r24 - std Z+35,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+36,r24 - std Z+37,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+38,r24 - std Z+39,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - ldd r26,Z+44 - ldd r27,Z+45 - ldd r2,Z+46 - ldd r3,Z+47 - ldd r4,Z+48 - ldd r5,Z+49 - movw r24,r26 - mov r0,r20 - com r0 - and r24,r0 - mov r0,r21 - com r0 - and r25,r0 - eor r24,r18 - eor r25,r19 - std Z+40,r24 - std Z+41,r25 - movw r24,r2 - mov r0,r26 - com r0 - and r24,r0 - mov r0,r27 - com r0 - and r25,r0 - eor r24,r20 - eor r25,r21 - std Z+42,r24 - std Z+43,r25 - movw r24,r4 - mov r0,r2 - com r0 - and r24,r0 - mov r0,r3 - com r0 - and r25,r0 - eor r24,r26 - eor r25,r27 - std Z+44,r24 - std Z+45,r25 - movw r24,r18 - mov r0,r4 - com r0 - and r24,r0 - mov r0,r5 - com r0 - and r25,r0 - eor r24,r2 - eor r25,r3 - std Z+46,r24 - std Z+47,r25 - movw r24,r20 - mov r0,r18 - com r0 - and r24,r0 - mov r0,r19 - com r0 - and r25,r0 - eor r24,r4 - eor r25,r5 - std Z+48,r24 - std Z+49,r25 - ret -1004: - st Z,r6 - std Z+1,r7 - std Z+2,r8 - std Z+3,r9 - std Z+4,r10 - std Z+5,r11 - std Z+6,r12 - std Z+7,r13 - std Z+8,r14 - std Z+9,r15 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size keccakp_400_permute, .-keccakp_400_permute - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.c b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.c deleted file mode 100644 index 60539df..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-keccak.h" - -#if !defined(__AVR__) - -/* Faster method to compute ((x + y) % 5) that avoids the division */ -static unsigned char const addMod5Table[9] = { - 0, 1, 2, 3, 4, 0, 1, 2, 3 -}; -#define addMod5(x, y) (addMod5Table[(x) + (y)]) - -void keccakp_200_permute(keccakp_200_state_t *state) -{ - static uint8_t const RC[18] = { - 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, - 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, - 0x02, 0x80 - }; - uint8_t C[5]; - uint8_t D; - unsigned round; - unsigned index, index2; - for (round = 0; round < 18; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_8(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate4_8(state->A[1][1]); - state->A[1][1] = leftRotate4_8(state->A[1][4]); - state->A[1][4] = leftRotate5_8(state->A[4][2]); - state->A[4][2] = leftRotate7_8(state->A[2][4]); - state->A[2][4] = leftRotate2_8(state->A[4][0]); - state->A[4][0] = leftRotate6_8(state->A[0][2]); - state->A[0][2] = leftRotate3_8(state->A[2][2]); - state->A[2][2] = leftRotate1_8(state->A[2][3]); - state->A[2][3] = state->A[3][4]; - state->A[3][4] = state->A[4][3]; - state->A[4][3] = leftRotate1_8(state->A[3][0]); - state->A[3][0] = leftRotate3_8(state->A[0][4]); - state->A[0][4] = leftRotate6_8(state->A[4][4]); - state->A[4][4] = leftRotate2_8(state->A[4][1]); - state->A[4][1] = leftRotate7_8(state->A[1][3]); - state->A[1][3] = leftRotate5_8(state->A[3][1]); - state->A[3][1] = leftRotate4_8(state->A[1][0]); - state->A[1][0] = leftRotate4_8(state->A[0][3]); - state->A[0][3] = leftRotate5_8(state->A[3][3]); - state->A[3][3] = leftRotate7_8(state->A[3][2]); - state->A[3][2] = leftRotate2_8(state->A[2][1]); - state->A[2][1] = leftRotate6_8(state->A[1][2]); - state->A[1][2] = leftRotate3_8(state->A[2][0]); - state->A[2][0] = leftRotate1_8(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define keccakp_400_permute_host keccakp_400_permute -#endif - -/* Keccak-p[400] that assumes that the input is already in host byte order */ -void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) -{ - static uint16_t const RC[20] = { - 0x0001, 0x8082, 0x808A, 0x8000, 0x808B, 0x0001, 0x8081, 0x8009, - 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, - 0x8002, 0x0080, 0x800A, 0x000A - }; - uint16_t C[5]; - uint16_t D; - unsigned round; - unsigned index, index2; - for (round = 20 - rounds; round < 20; ++round) { - /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. Compute D on the fly */ - for (index = 0; index < 5; ++index) { - C[index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; - } - for (index = 0; index < 5; ++index) { - D = C[addMod5(index, 4)] ^ - leftRotate1_16(C[addMod5(index, 1)]); - for (index2 = 0; index2 < 5; ++index2) - state->A[index2][index] ^= D; - } - - /* Step mapping rho and pi combined into a single step. - * Rotate all lanes by a specific offset and rearrange */ - D = state->A[0][1]; - state->A[0][1] = leftRotate12_16(state->A[1][1]); - state->A[1][1] = leftRotate4_16 (state->A[1][4]); - state->A[1][4] = leftRotate13_16(state->A[4][2]); - state->A[4][2] = leftRotate7_16 (state->A[2][4]); - state->A[2][4] = leftRotate2_16 (state->A[4][0]); - state->A[4][0] = leftRotate14_16(state->A[0][2]); - state->A[0][2] = leftRotate11_16(state->A[2][2]); - state->A[2][2] = leftRotate9_16 (state->A[2][3]); - state->A[2][3] = leftRotate8_16 (state->A[3][4]); - state->A[3][4] = leftRotate8_16 (state->A[4][3]); - state->A[4][3] = leftRotate9_16 (state->A[3][0]); - state->A[3][0] = leftRotate11_16(state->A[0][4]); - state->A[0][4] = leftRotate14_16(state->A[4][4]); - state->A[4][4] = leftRotate2_16 (state->A[4][1]); - state->A[4][1] = leftRotate7_16 (state->A[1][3]); - state->A[1][3] = leftRotate13_16(state->A[3][1]); - state->A[3][1] = leftRotate4_16 (state->A[1][0]); - state->A[1][0] = leftRotate12_16(state->A[0][3]); - state->A[0][3] = leftRotate5_16 (state->A[3][3]); - state->A[3][3] = leftRotate15_16(state->A[3][2]); - state->A[3][2] = leftRotate10_16(state->A[2][1]); - state->A[2][1] = leftRotate6_16 (state->A[1][2]); - state->A[1][2] = leftRotate3_16 (state->A[2][0]); - state->A[2][0] = leftRotate1_16(D); - - /* Step mapping chi. Combine each lane with two others in its row */ - for (index = 0; index < 5; ++index) { - C[0] = state->A[index][0]; - C[1] = state->A[index][1]; - C[2] = state->A[index][2]; - C[3] = state->A[index][3]; - C[4] = state->A[index][4]; - for (index2 = 0; index2 < 5; ++index2) { - state->A[index][index2] = - C[index2] ^ - ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); - } - } - - /* Step mapping iota. XOR A[0][0] with the round constant */ - state->A[0][0] ^= RC[round]; - } -} - -#if !defined(LW_UTIL_LITTLE_ENDIAN) - -/** - * \brief Reverses the bytes in a Keccak-p[400] state. - * - * \param state The Keccak-p[400] state to apply byte-reversal to. - */ -static void keccakp_400_reverse_bytes(keccakp_400_state_t *state) -{ - unsigned index; - unsigned char temp1; - unsigned char temp2; - for (index = 0; index < 50; index += 2) { - temp1 = state->B[index]; - temp2 = state->B[index + 1]; - state->B[index] = temp2; - state->B[index + 1] = temp1; - } -} - -/* Keccak-p[400] that requires byte reversal on input and output */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) -{ - keccakp_400_reverse_bytes(state); - keccakp_400_permute_host(state, rounds); - keccakp_400_reverse_bytes(state); -} - -#endif - -#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.h b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.h deleted file mode 100644 index 2ffef42..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-keccak.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KECCAK_H -#define LW_INTERNAL_KECCAK_H - -#include "internal-util.h" - -/** - * \file internal-keccak.h - * \brief Internal implementation of the Keccak-p permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for the Keccak-p[200] permutation. - */ -#define KECCAKP_200_STATE_SIZE 25 - -/** - * \brief Size of the state for the Keccak-p[400] permutation. - */ -#define KECCAKP_400_STATE_SIZE 50 - -/** - * \brief Structure of the internal state of the Keccak-p[200] permutation. - */ -typedef union -{ - uint8_t A[5][5]; /**< Keccak-p[200] state as a 5x5 array of lanes */ - uint8_t B[25]; /**< Keccak-p[200] state as a byte array */ - -} keccakp_200_state_t; - -/** - * \brief Structure of the internal state of the Keccak-p[400] permutation. - */ -typedef union -{ - uint16_t A[5][5]; /**< Keccak-p[400] state as a 5x5 array of lanes */ - uint8_t B[50]; /**< Keccak-p[400] state as a byte array */ - -} keccakp_400_state_t; - -/** - * \brief Permutes the Keccak-p[200] state. - * - * \param state The Keccak-p[200] state to be permuted. - */ -void keccakp_200_permute(keccakp_200_state_t *state); - -/** - * \brief Permutes the Keccak-p[400] state, which is assumed to be in - * little-endian byte order. - * - * \param state The Keccak-p[400] state to be permuted. - * \param rounds The number of rounds to perform (up to 20). - */ -void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-util.h b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.c b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.c deleted file mode 100644 index 26d50a3..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "isap.h" -#include "internal-keccak.h" -#include "internal-ascon.h" -#include - -aead_cipher_t const isap_keccak_128a_cipher = { - "ISAP-K-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128a_aead_encrypt, - isap_keccak_128a_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128a_cipher = { - "ISAP-A-128A", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128a_aead_encrypt, - isap_ascon_128a_aead_decrypt -}; - -aead_cipher_t const isap_keccak_128_cipher = { - "ISAP-K-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_keccak_128_aead_encrypt, - isap_keccak_128_aead_decrypt -}; - -aead_cipher_t const isap_ascon_128_cipher = { - "ISAP-A-128", - ISAP_KEY_SIZE, - ISAP_NONCE_SIZE, - ISAP_TAG_SIZE, - AEAD_FLAG_NONE, - isap_ascon_128_aead_encrypt, - isap_ascon_128_aead_decrypt -}; - -/* ISAP-K-128A */ -#define ISAP_ALG_NAME isap_keccak_128a -#define ISAP_RATE (144 / 8) -#define ISAP_sH 16 -#define ISAP_sE 8 -#define ISAP_sB 1 -#define ISAP_sK 8 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128A */ -#define ISAP_ALG_NAME isap_ascon_128a -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 6 -#define ISAP_sB 1 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" - -/* ISAP-K-128 */ -#define ISAP_ALG_NAME isap_keccak_128 -#define ISAP_RATE (144 / 8) -#define ISAP_sH 20 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE keccakp_400_state_t -#define ISAP_PERMUTE(s,r) keccakp_400_permute((s), (r)) -#include "internal-isap.h" - -/* ISAP-A-128 */ -#define ISAP_ALG_NAME isap_ascon_128 -#define ISAP_RATE (64 / 8) -#define ISAP_sH 12 -#define ISAP_sE 12 -#define ISAP_sB 12 -#define ISAP_sK 12 -#define ISAP_STATE ascon_state_t -#define ISAP_PERMUTE(s,r) ascon_permute((s), 12 - (r)) -#include "internal-isap.h" diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.h b/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.h deleted file mode 100644 index ddf8203..0000000 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys-avr/isap.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ISAP_H -#define LWCRYPTO_ISAP_H - -#include "aead-common.h" - -/** - * \file isap.h - * \brief ISAP authenticated encryption algorithm. - * - * ISAP is a family of authenticated encryption algorithms that are built - * around the Keccak-p[400] or ASCON permutations. There are four algorithms - * in the family, each of which have a 128-bit key, a 128-bit nonce, and a - * 128-bit tag: - * - * \li ISAP-K-128A based around the Keccak-p[400] permutation with a - * reduced number of rounds. This is the primary member in the family. - * \li ISAP-A-128A based around the ASCON permutation with a reduced - * number of rounds. - * \li ISAP-K-128 based around the Keccak-p[400] permutation. - * \li ISAP-A-128 based around the ASCON permutation. - * - * ISAP is designed to provide some protection against adversaries - * using differential power analysis to determine the key. The - * downside is that key setup is very slow. - * - * References: https://isap.iaik.tugraz.at/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all ISAP family members. - */ -#define ISAP_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all ISAP family members. - */ -#define ISAP_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all ISAP family members. - */ -#define ISAP_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the ISAP-K-128A cipher. - */ -extern aead_cipher_t const isap_keccak_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128A cipher. - */ -extern aead_cipher_t const isap_ascon_128a_cipher; - -/** - * \brief Meta-information block for the ISAP-K-128 cipher. - */ -extern aead_cipher_t const isap_keccak_128_cipher; - -/** - * \brief Meta-information block for the ISAP-A-128 cipher. - */ -extern aead_cipher_t const isap_ascon_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128a_aead_decrypt() - */ -int isap_keccak_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128a_aead_encrypt() - */ -int isap_keccak_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128A. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128a_aead_decrypt() - */ -int isap_ascon_128a_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128A. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128a_aead_encrypt() - */ -int isap_ascon_128a_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-K-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_keccak_128_aead_decrypt() - */ -int isap_keccak_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-K-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_keccak_128_aead_encrypt() - */ -int isap_keccak_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with ISAP-A-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa isap_ascon_128_aead_decrypt() - */ -int isap_ascon_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ISAP-A-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa isap_ascon_128_aead_encrypt() - */ -int isap_ascon_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon-avr.S b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon-avr.S new file mode 100644 index 0000000..e8a4fb4 --- /dev/null +++ b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon-avr.S @@ -0,0 +1,778 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global ascon_permute + .type ascon_permute, @function +ascon_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r18,15 + sub r18,r22 + swap r18 + or r22,r18 + ldd r3,Z+16 + ldd r2,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 +20: + eor r18,r22 + ldd r23,Z+7 + ldd r12,Z+15 + ldd r13,Z+31 + eor r23,r4 + eor r4,r13 + eor r18,r12 + mov r14,r23 + mov r15,r12 + mov r24,r18 + mov r25,r13 + mov r16,r4 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r18 + and r24,r13 + and r25,r4 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r18,r25 + eor r13,r16 + eor r4,r14 + eor r12,r23 + eor r23,r4 + eor r13,r18 + com r18 + std Z+7,r23 + std Z+15,r12 + std Z+31,r13 + std Z+39,r4 + ldd r23,Z+6 + ldd r12,Z+14 + ldd r13,Z+30 + eor r23,r5 + eor r5,r13 + eor r19,r12 + mov r14,r23 + mov r15,r12 + mov r24,r19 + mov r25,r13 + mov r16,r5 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r19 + and r24,r13 + and r25,r5 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r19,r25 + eor r13,r16 + eor r5,r14 + eor r12,r23 + eor r23,r5 + eor r13,r19 + com r19 + std Z+6,r23 + std Z+14,r12 + std Z+30,r13 + std Z+38,r5 + ldd r23,Z+5 + ldd r12,Z+13 + ldd r13,Z+29 + eor r23,r6 + eor r6,r13 + eor r20,r12 + mov r14,r23 + mov r15,r12 + mov r24,r20 + mov r25,r13 + mov r16,r6 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r20 + and r24,r13 + and r25,r6 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r20,r25 + eor r13,r16 + eor r6,r14 + eor r12,r23 + eor r23,r6 + eor r13,r20 + com r20 + std Z+5,r23 + std Z+13,r12 + std Z+29,r13 + std Z+37,r6 + ldd r23,Z+4 + ldd r12,Z+12 + ldd r13,Z+28 + eor r23,r7 + eor r7,r13 + eor r21,r12 + mov r14,r23 + mov r15,r12 + mov r24,r21 + mov r25,r13 + mov r16,r7 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r21 + and r24,r13 + and r25,r7 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r21,r25 + eor r13,r16 + eor r7,r14 + eor r12,r23 + eor r23,r7 + eor r13,r21 + com r21 + std Z+4,r23 + std Z+12,r12 + std Z+28,r13 + std Z+36,r7 + ldd r23,Z+3 + ldd r12,Z+11 + ldd r13,Z+27 + eor r23,r8 + eor r8,r13 + eor r26,r12 + mov r14,r23 + mov r15,r12 + mov r24,r26 + mov r25,r13 + mov r16,r8 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r26 + and r24,r13 + and r25,r8 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r26,r25 + eor r13,r16 + eor r8,r14 + eor r12,r23 + eor r23,r8 + eor r13,r26 + com r26 + std Z+3,r23 + std Z+11,r12 + std Z+27,r13 + std Z+35,r8 + ldd r23,Z+2 + ldd r12,Z+10 + ldd r13,Z+26 + eor r23,r9 + eor r9,r13 + eor r27,r12 + mov r14,r23 + mov r15,r12 + mov r24,r27 + mov r25,r13 + mov r16,r9 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r27 + and r24,r13 + and r25,r9 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r27,r25 + eor r13,r16 + eor r9,r14 + eor r12,r23 + eor r23,r9 + eor r13,r27 + com r27 + std Z+2,r23 + std Z+10,r12 + std Z+26,r13 + std Z+34,r9 + ldd r23,Z+1 + ldd r12,Z+9 + ldd r13,Z+25 + eor r23,r10 + eor r10,r13 + eor r2,r12 + mov r14,r23 + mov r15,r12 + mov r24,r2 + mov r25,r13 + mov r16,r10 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r2 + and r24,r13 + and r25,r10 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r2,r25 + eor r13,r16 + eor r10,r14 + eor r12,r23 + eor r23,r10 + eor r13,r2 + com r2 + std Z+1,r23 + std Z+9,r12 + std Z+25,r13 + std Z+33,r10 + ld r23,Z + ldd r12,Z+8 + ldd r13,Z+24 + eor r23,r11 + eor r11,r13 + eor r3,r12 + mov r14,r23 + mov r15,r12 + mov r24,r3 + mov r25,r13 + mov r16,r11 + com r14 + com r15 + com r24 + com r25 + com r16 + and r14,r12 + and r15,r3 + and r24,r13 + and r25,r11 + and r16,r23 + eor r23,r15 + eor r12,r24 + eor r3,r25 + eor r13,r16 + eor r11,r14 + eor r12,r23 + eor r23,r11 + eor r13,r3 + com r3 + st Z,r23 + std Z+8,r12 + std Z+24,r13 + std Z+32,r11 + ld r11,Z + ldd r10,Z+1 + ldd r9,Z+2 + ldd r8,Z+3 + ldd r7,Z+4 + ldd r6,Z+5 + ldd r5,Z+6 + ldd r4,Z+7 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r14 + mov r14,r24 + mov r24,r16 + mov r16,r0 + mov r0,r13 + mov r13,r15 + mov r15,r25 + mov r25,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r4 + mov r0,r5 + push r6 + mov r4,r7 + mov r5,r8 + mov r6,r9 + mov r7,r10 + mov r8,r11 + pop r11 + mov r10,r0 + mov r9,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + st Z,r11 + std Z+1,r10 + std Z+2,r9 + std Z+3,r8 + std Z+4,r7 + std Z+5,r6 + std Z+6,r5 + std Z+7,r4 + ldd r11,Z+8 + ldd r10,Z+9 + ldd r9,Z+10 + ldd r8,Z+11 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + lsl r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r4,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+8,r11 + std Z+9,r10 + std Z+10,r9 + std Z+11,r8 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + movw r12,r18 + movw r14,r20 + movw r24,r26 + movw r16,r2 + bst r12,0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + bld r17,7 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + eor r24,r26 + eor r25,r27 + eor r16,r2 + eor r17,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r26 + mov r26,r27 + mov r27,r2 + mov r2,r3 + mov r3,r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r2 + rol r3 + adc r18,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r26,r24 + eor r27,r25 + eor r2,r16 + eor r3,r17 + ldd r11,Z+24 + ldd r10,Z+25 + ldd r9,Z+26 + ldd r8,Z+27 + ldd r7,Z+28 + ldd r6,Z+29 + ldd r5,Z+30 + ldd r4,Z+31 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + mov r0,r1 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r0 + or r17,r0 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r0,r4 + mov r4,r6 + mov r6,r8 + mov r8,r10 + mov r10,r0 + mov r0,r5 + mov r5,r7 + mov r7,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + std Z+24,r11 + std Z+25,r10 + std Z+26,r9 + std Z+27,r8 + std Z+28,r7 + std Z+29,r6 + std Z+30,r5 + std Z+31,r4 + ldd r11,Z+32 + ldd r10,Z+33 + ldd r9,Z+34 + ldd r8,Z+35 + ldd r7,Z+36 + ldd r6,Z+37 + ldd r5,Z+38 + ldd r4,Z+39 + movw r12,r4 + movw r14,r6 + movw r24,r8 + movw r16,r10 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r24 + mov r24,r25 + mov r25,r16 + mov r16,r17 + mov r17,r0 + lsl r12 + rol r13 + rol r14 + rol r15 + rol r24 + rol r25 + rol r16 + rol r17 + adc r12,r1 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + mov r23,r9 + mov r0,r10 + push r11 + mov r11,r8 + mov r10,r7 + mov r9,r6 + mov r8,r5 + mov r7,r4 + pop r6 + mov r5,r0 + mov r4,r23 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r0 + or r11,r0 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + subi r22,15 + ldi r25,60 + cpse r22,r25 + rjmp 20b + std Z+16,r3 + std Z+17,r2 + std Z+18,r27 + std Z+19,r26 + std Z+20,r21 + std Z+21,r20 + std Z+22,r19 + std Z+23,r18 + std Z+32,r11 + std Z+33,r10 + std Z+34,r9 + std Z+35,r8 + std Z+36,r7 + std Z+37,r6 + std Z+38,r5 + std Z+39,r4 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size ascon_permute, .-ascon_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon.c b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon.c index 12a8ec6..657aabe 100644 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon.c +++ b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-ascon.c @@ -22,6 +22,8 @@ #include "internal-ascon.h" +#if !defined(__AVR__) + void ascon_permute(ascon_state_t *state, uint8_t first_round) { uint64_t t0, t1, t2, t3, t4; @@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round) state->S[4] = x4; #endif } + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak-avr.S b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak-avr.S new file mode 100644 index 0000000..e50ccaf --- /dev/null +++ b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak-avr.S @@ -0,0 +1,1552 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global keccakp_200_permute + .type keccakp_200_permute, @function +keccakp_200_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r26,Z+6 + ldd r27,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r4,Z+12 + ldd r5,Z+13 + ldd r6,Z+14 + ldd r7,Z+15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + ldd r24,Z+24 + push r31 + push r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,130 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + mov r30,r1 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,1 + eor r18,r30 + rcall 82f + ldi r30,129 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,138 + eor r18,r30 + rcall 82f + ldi r30,136 + eor r18,r30 + rcall 82f + ldi r30,9 + eor r18,r30 + rcall 82f + ldi r30,10 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,139 + eor r18,r30 + rcall 82f + ldi r30,137 + eor r18,r30 + rcall 82f + ldi r30,3 + eor r18,r30 + rcall 82f + ldi r30,2 + eor r18,r30 + rcall 82f + ldi r30,128 + eor r18,r30 + rjmp 420f +82: + mov r30,r18 + eor r30,r23 + eor r30,r2 + eor r30,r7 + eor r30,r12 + mov r31,r19 + eor r31,r26 + eor r31,r3 + eor r31,r8 + eor r31,r13 + mov r25,r20 + eor r25,r27 + eor r25,r4 + eor r25,r9 + eor r25,r14 + mov r16,r21 + eor r16,r28 + eor r16,r5 + eor r16,r10 + eor r16,r15 + mov r17,r22 + eor r17,r29 + eor r17,r6 + eor r17,r11 + eor r17,r24 + mov r0,r31 + lsl r0 + adc r0,r1 + eor r0,r17 + eor r18,r0 + eor r23,r0 + eor r2,r0 + eor r7,r0 + eor r12,r0 + mov r0,r25 + lsl r0 + adc r0,r1 + eor r0,r30 + eor r19,r0 + eor r26,r0 + eor r3,r0 + eor r8,r0 + eor r13,r0 + mov r0,r16 + lsl r0 + adc r0,r1 + eor r0,r31 + eor r20,r0 + eor r27,r0 + eor r4,r0 + eor r9,r0 + eor r14,r0 + mov r0,r17 + lsl r0 + adc r0,r1 + eor r0,r25 + eor r21,r0 + eor r28,r0 + eor r5,r0 + eor r10,r0 + eor r15,r0 + mov r0,r30 + lsl r0 + adc r0,r1 + eor r0,r16 + eor r22,r0 + eor r29,r0 + eor r6,r0 + eor r11,r0 + eor r24,r0 + mov r30,r19 + swap r26 + mov r19,r26 + swap r29 + mov r26,r29 + mov r0,r1 + lsr r14 + ror r0 + lsr r14 + ror r0 + lsr r14 + ror r0 + or r14,r0 + mov r29,r14 + bst r6,0 + lsr r6 + bld r6,7 + mov r14,r6 + lsl r12 + adc r12,r1 + lsl r12 + adc r12,r1 + mov r6,r12 + mov r0,r1 + lsr r20 + ror r0 + lsr r20 + ror r0 + or r20,r0 + mov r12,r20 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + mov r20,r4 + lsl r5 + adc r5,r1 + mov r4,r5 + mov r5,r11 + mov r11,r15 + lsl r7 + adc r7,r1 + mov r15,r7 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + mov r7,r22 + mov r0,r1 + lsr r24 + ror r0 + lsr r24 + ror r0 + or r24,r0 + mov r22,r24 + lsl r13 + adc r13,r1 + lsl r13 + adc r13,r1 + mov r24,r13 + bst r28,0 + lsr r28 + bld r28,7 + mov r13,r28 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r28,r8 + swap r23 + mov r8,r23 + swap r21 + mov r23,r21 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r21,r10 + bst r9,0 + lsr r9 + bld r9,7 + mov r10,r9 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + mov r9,r3 + mov r0,r1 + lsr r27 + ror r0 + lsr r27 + ror r0 + or r27,r0 + mov r3,r27 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + mov r27,r2 + lsl r30 + adc r30,r1 + mov r2,r30 + mov r30,r18 + mov r31,r19 + mov r25,r20 + mov r16,r21 + mov r17,r22 + mov r18,r25 + mov r0,r31 + com r0 + and r18,r0 + eor r18,r30 + mov r19,r16 + mov r0,r25 + com r0 + and r19,r0 + eor r19,r31 + mov r20,r17 + mov r0,r16 + com r0 + and r20,r0 + eor r20,r25 + mov r21,r30 + mov r0,r17 + com r0 + and r21,r0 + eor r21,r16 + mov r22,r31 + mov r0,r30 + com r0 + and r22,r0 + eor r22,r17 + mov r30,r23 + mov r31,r26 + mov r25,r27 + mov r16,r28 + mov r17,r29 + mov r23,r25 + mov r0,r31 + com r0 + and r23,r0 + eor r23,r30 + mov r26,r16 + mov r0,r25 + com r0 + and r26,r0 + eor r26,r31 + mov r27,r17 + mov r0,r16 + com r0 + and r27,r0 + eor r27,r25 + mov r28,r30 + mov r0,r17 + com r0 + and r28,r0 + eor r28,r16 + mov r29,r31 + mov r0,r30 + com r0 + and r29,r0 + eor r29,r17 + mov r30,r2 + mov r31,r3 + mov r25,r4 + mov r16,r5 + mov r17,r6 + mov r2,r25 + mov r0,r31 + com r0 + and r2,r0 + eor r2,r30 + mov r3,r16 + mov r0,r25 + com r0 + and r3,r0 + eor r3,r31 + mov r4,r17 + mov r0,r16 + com r0 + and r4,r0 + eor r4,r25 + mov r5,r30 + mov r0,r17 + com r0 + and r5,r0 + eor r5,r16 + mov r6,r31 + mov r0,r30 + com r0 + and r6,r0 + eor r6,r17 + mov r30,r7 + mov r31,r8 + mov r25,r9 + mov r16,r10 + mov r17,r11 + mov r7,r25 + mov r0,r31 + com r0 + and r7,r0 + eor r7,r30 + mov r8,r16 + mov r0,r25 + com r0 + and r8,r0 + eor r8,r31 + mov r9,r17 + mov r0,r16 + com r0 + and r9,r0 + eor r9,r25 + mov r10,r30 + mov r0,r17 + com r0 + and r10,r0 + eor r10,r16 + mov r11,r31 + mov r0,r30 + com r0 + and r11,r0 + eor r11,r17 + mov r30,r12 + mov r31,r13 + mov r25,r14 + mov r16,r15 + mov r17,r24 + mov r12,r25 + mov r0,r31 + com r0 + and r12,r0 + eor r12,r30 + mov r13,r16 + mov r0,r25 + com r0 + and r13,r0 + eor r13,r31 + mov r14,r17 + mov r0,r16 + com r0 + and r14,r0 + eor r14,r25 + mov r15,r30 + mov r0,r17 + com r0 + and r15,r0 + eor r15,r16 + mov r24,r31 + mov r0,r30 + com r0 + and r24,r0 + eor r24,r17 + ret +420: + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r22 + std Z+5,r23 + std Z+6,r26 + std Z+7,r27 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r4 + std Z+13,r5 + std Z+14,r6 + std Z+15,r7 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + std Z+24,r24 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size keccakp_200_permute, .-keccakp_200_permute + + .text +.global keccakp_400_permute + .type keccakp_400_permute, @function +keccakp_400_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + movw r30,r24 +.L__stack_usage = 17 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + ldd r10,Z+4 + ldd r11,Z+5 + ldd r12,Z+6 + ldd r13,Z+7 + ldd r14,Z+8 + ldd r15,Z+9 + cpi r22,20 + brcs 15f + rcall 153f + ldi r23,1 + eor r6,r23 +15: + cpi r22,19 + brcs 23f + rcall 153f + ldi r23,130 + eor r6,r23 + ldi r17,128 + eor r7,r17 +23: + cpi r22,18 + brcs 31f + rcall 153f + ldi r23,138 + eor r6,r23 + ldi r17,128 + eor r7,r17 +31: + cpi r22,17 + brcs 37f + rcall 153f + ldi r23,128 + eor r7,r23 +37: + cpi r22,16 + brcs 45f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +45: + cpi r22,15 + brcs 51f + rcall 153f + ldi r23,1 + eor r6,r23 +51: + cpi r22,14 + brcs 59f + rcall 153f + ldi r23,129 + eor r6,r23 + ldi r17,128 + eor r7,r17 +59: + cpi r22,13 + brcs 67f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +67: + cpi r22,12 + brcs 73f + rcall 153f + ldi r23,138 + eor r6,r23 +73: + cpi r22,11 + brcs 79f + rcall 153f + ldi r23,136 + eor r6,r23 +79: + cpi r22,10 + brcs 87f + rcall 153f + ldi r23,9 + eor r6,r23 + ldi r17,128 + eor r7,r17 +87: + cpi r22,9 + brcs 93f + rcall 153f + ldi r23,10 + eor r6,r23 +93: + cpi r22,8 + brcs 101f + rcall 153f + ldi r23,139 + eor r6,r23 + ldi r17,128 + eor r7,r17 +101: + cpi r22,7 + brcs 107f + rcall 153f + ldi r23,139 + eor r6,r23 +107: + cpi r22,6 + brcs 115f + rcall 153f + ldi r23,137 + eor r6,r23 + ldi r17,128 + eor r7,r17 +115: + cpi r22,5 + brcs 123f + rcall 153f + ldi r23,3 + eor r6,r23 + ldi r17,128 + eor r7,r17 +123: + cpi r22,4 + brcs 131f + rcall 153f + ldi r23,2 + eor r6,r23 + ldi r17,128 + eor r7,r17 +131: + cpi r22,3 + brcs 137f + rcall 153f + ldi r23,128 + eor r6,r23 +137: + cpi r22,2 + brcs 145f + rcall 153f + ldi r23,10 + eor r6,r23 + ldi r17,128 + eor r7,r17 +145: + cpi r22,1 + brcs 151f + rcall 153f + ldi r23,10 + eor r6,r23 +151: + rjmp 1004f +153: + movw r18,r6 + ldd r0,Z+10 + eor r18,r0 + ldd r0,Z+11 + eor r19,r0 + ldd r0,Z+20 + eor r18,r0 + ldd r0,Z+21 + eor r19,r0 + ldd r0,Z+30 + eor r18,r0 + ldd r0,Z+31 + eor r19,r0 + ldd r0,Z+40 + eor r18,r0 + ldd r0,Z+41 + eor r19,r0 + movw r20,r8 + ldd r0,Z+12 + eor r20,r0 + ldd r0,Z+13 + eor r21,r0 + ldd r0,Z+22 + eor r20,r0 + ldd r0,Z+23 + eor r21,r0 + ldd r0,Z+32 + eor r20,r0 + ldd r0,Z+33 + eor r21,r0 + ldd r0,Z+42 + eor r20,r0 + ldd r0,Z+43 + eor r21,r0 + movw r26,r10 + ldd r0,Z+14 + eor r26,r0 + ldd r0,Z+15 + eor r27,r0 + ldd r0,Z+24 + eor r26,r0 + ldd r0,Z+25 + eor r27,r0 + ldd r0,Z+34 + eor r26,r0 + ldd r0,Z+35 + eor r27,r0 + ldd r0,Z+44 + eor r26,r0 + ldd r0,Z+45 + eor r27,r0 + movw r2,r12 + ldd r0,Z+16 + eor r2,r0 + ldd r0,Z+17 + eor r3,r0 + ldd r0,Z+26 + eor r2,r0 + ldd r0,Z+27 + eor r3,r0 + ldd r0,Z+36 + eor r2,r0 + ldd r0,Z+37 + eor r3,r0 + ldd r0,Z+46 + eor r2,r0 + ldd r0,Z+47 + eor r3,r0 + movw r4,r14 + ldd r0,Z+18 + eor r4,r0 + ldd r0,Z+19 + eor r5,r0 + ldd r0,Z+28 + eor r4,r0 + ldd r0,Z+29 + eor r5,r0 + ldd r0,Z+38 + eor r4,r0 + ldd r0,Z+39 + eor r5,r0 + ldd r0,Z+48 + eor r4,r0 + ldd r0,Z+49 + eor r5,r0 + movw r24,r20 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r4 + eor r25,r5 + eor r6,r24 + eor r7,r25 + ldd r0,Z+10 + eor r0,r24 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r25 + std Z+11,r0 + ldd r0,Z+20 + eor r0,r24 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r25 + std Z+21,r0 + ldd r0,Z+30 + eor r0,r24 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r25 + std Z+31,r0 + ldd r0,Z+40 + eor r0,r24 + std Z+40,r0 + ldd r0,Z+41 + eor r0,r25 + std Z+41,r0 + movw r24,r26 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r18 + eor r25,r19 + eor r8,r24 + eor r9,r25 + ldd r0,Z+12 + eor r0,r24 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r25 + std Z+13,r0 + ldd r0,Z+22 + eor r0,r24 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r25 + std Z+23,r0 + ldd r0,Z+32 + eor r0,r24 + std Z+32,r0 + ldd r0,Z+33 + eor r0,r25 + std Z+33,r0 + ldd r0,Z+42 + eor r0,r24 + std Z+42,r0 + ldd r0,Z+43 + eor r0,r25 + std Z+43,r0 + movw r24,r2 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r20 + eor r25,r21 + eor r10,r24 + eor r11,r25 + ldd r0,Z+14 + eor r0,r24 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r25 + std Z+15,r0 + ldd r0,Z+24 + eor r0,r24 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r25 + std Z+25,r0 + ldd r0,Z+34 + eor r0,r24 + std Z+34,r0 + ldd r0,Z+35 + eor r0,r25 + std Z+35,r0 + ldd r0,Z+44 + eor r0,r24 + std Z+44,r0 + ldd r0,Z+45 + eor r0,r25 + std Z+45,r0 + movw r24,r4 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r26 + eor r25,r27 + eor r12,r24 + eor r13,r25 + ldd r0,Z+16 + eor r0,r24 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r25 + std Z+17,r0 + ldd r0,Z+26 + eor r0,r24 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r25 + std Z+27,r0 + ldd r0,Z+36 + eor r0,r24 + std Z+36,r0 + ldd r0,Z+37 + eor r0,r25 + std Z+37,r0 + ldd r0,Z+46 + eor r0,r24 + std Z+46,r0 + ldd r0,Z+47 + eor r0,r25 + std Z+47,r0 + movw r24,r18 + lsl r24 + rol r25 + adc r24,r1 + eor r24,r2 + eor r25,r3 + eor r14,r24 + eor r15,r25 + ldd r0,Z+18 + eor r0,r24 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r25 + std Z+19,r0 + ldd r0,Z+28 + eor r0,r24 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r25 + std Z+29,r0 + ldd r0,Z+38 + eor r0,r24 + std Z+38,r0 + ldd r0,Z+39 + eor r0,r25 + std Z+39,r0 + ldd r0,Z+48 + eor r0,r24 + std Z+48,r0 + ldd r0,Z+49 + eor r0,r25 + std Z+49,r0 + movw r24,r8 + ldd r8,Z+12 + ldd r9,Z+13 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldd r18,Z+18 + ldd r19,Z+19 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+12,r18 + std Z+13,r19 + ldd r18,Z+44 + ldd r19,Z+45 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+18,r18 + std Z+19,r19 + ldd r18,Z+28 + ldd r19,Z+29 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+44,r18 + std Z+45,r19 + ldd r18,Z+40 + ldd r19,Z+41 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+28,r18 + std Z+29,r19 + movw r18,r10 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+40,r18 + std Z+41,r19 + ldd r10,Z+24 + ldd r11,Z+25 + mov r0,r11 + mov r11,r10 + mov r10,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldd r18,Z+26 + ldd r19,Z+27 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+24,r18 + std Z+25,r19 + ldd r18,Z+38 + ldd r19,Z+39 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+26,r18 + std Z+27,r19 + ldd r18,Z+46 + ldd r19,Z+47 + mov r0,r19 + mov r19,r18 + mov r18,r0 + std Z+38,r18 + std Z+39,r19 + ldd r18,Z+30 + ldd r19,Z+31 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + std Z+46,r18 + std Z+47,r19 + movw r18,r14 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+30,r18 + std Z+31,r19 + ldd r14,Z+48 + ldd r15,Z+49 + mov r0,r1 + lsr r15 + ror r14 + ror r0 + lsr r15 + ror r14 + ror r0 + or r15,r0 + ldd r18,Z+42 + ldd r19,Z+43 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+48,r18 + std Z+49,r19 + ldd r18,Z+16 + ldd r19,Z+17 + mov r0,r19 + mov r19,r18 + mov r18,r0 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+42,r18 + std Z+43,r19 + ldd r18,Z+32 + ldd r19,Z+33 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+16,r18 + std Z+17,r19 + ldd r18,Z+10 + ldd r19,Z+11 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+32,r18 + std Z+33,r19 + movw r18,r12 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+10,r18 + std Z+11,r19 + ldd r12,Z+36 + ldd r13,Z+37 + mov r0,r13 + mov r13,r12 + mov r12,r0 + mov r0,r1 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + lsr r13 + ror r12 + ror r0 + or r13,r0 + ldd r18,Z+34 + ldd r19,Z+35 + bst r18,0 + lsr r19 + ror r18 + bld r19,7 + std Z+36,r18 + std Z+37,r19 + ldd r18,Z+22 + ldd r19,Z+23 + mov r0,r19 + mov r19,r18 + mov r18,r0 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+34,r18 + std Z+35,r19 + ldd r18,Z+14 + ldd r19,Z+15 + mov r0,r19 + mov r19,r18 + mov r18,r0 + mov r0,r1 + lsr r19 + ror r18 + ror r0 + lsr r19 + ror r18 + ror r0 + or r19,r0 + std Z+22,r18 + std Z+23,r19 + ldd r18,Z+20 + ldd r19,Z+21 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + lsl r18 + rol r19 + adc r18,r1 + std Z+14,r18 + std Z+15,r19 + lsl r24 + rol r25 + adc r24,r1 + std Z+20,r24 + std Z+21,r25 + movw r18,r6 + movw r20,r8 + movw r26,r10 + movw r2,r12 + movw r4,r14 + movw r6,r26 + mov r0,r20 + com r0 + and r6,r0 + mov r0,r21 + com r0 + and r7,r0 + eor r6,r18 + eor r7,r19 + movw r8,r2 + mov r0,r26 + com r0 + and r8,r0 + mov r0,r27 + com r0 + and r9,r0 + eor r8,r20 + eor r9,r21 + movw r10,r4 + mov r0,r2 + com r0 + and r10,r0 + mov r0,r3 + com r0 + and r11,r0 + eor r10,r26 + eor r11,r27 + movw r12,r18 + mov r0,r4 + com r0 + and r12,r0 + mov r0,r5 + com r0 + and r13,r0 + eor r12,r2 + eor r13,r3 + movw r14,r20 + mov r0,r18 + com r0 + and r14,r0 + mov r0,r19 + com r0 + and r15,r0 + eor r14,r4 + eor r15,r5 + ldd r18,Z+10 + ldd r19,Z+11 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r26,Z+14 + ldd r27,Z+15 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+10,r24 + std Z+11,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+12,r24 + std Z+13,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+14,r24 + std Z+15,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+16,r24 + std Z+17,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+18,r24 + std Z+19,r25 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+20,r24 + std Z+21,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+22,r24 + std Z+23,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+24,r24 + std Z+25,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+26,r24 + std Z+27,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+28,r24 + std Z+29,r25 + ldd r18,Z+30 + ldd r19,Z+31 + ldd r20,Z+32 + ldd r21,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+30,r24 + std Z+31,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+32,r24 + std Z+33,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+34,r24 + std Z+35,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+36,r24 + std Z+37,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+38,r24 + std Z+39,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + ldd r26,Z+44 + ldd r27,Z+45 + ldd r2,Z+46 + ldd r3,Z+47 + ldd r4,Z+48 + ldd r5,Z+49 + movw r24,r26 + mov r0,r20 + com r0 + and r24,r0 + mov r0,r21 + com r0 + and r25,r0 + eor r24,r18 + eor r25,r19 + std Z+40,r24 + std Z+41,r25 + movw r24,r2 + mov r0,r26 + com r0 + and r24,r0 + mov r0,r27 + com r0 + and r25,r0 + eor r24,r20 + eor r25,r21 + std Z+42,r24 + std Z+43,r25 + movw r24,r4 + mov r0,r2 + com r0 + and r24,r0 + mov r0,r3 + com r0 + and r25,r0 + eor r24,r26 + eor r25,r27 + std Z+44,r24 + std Z+45,r25 + movw r24,r18 + mov r0,r4 + com r0 + and r24,r0 + mov r0,r5 + com r0 + and r25,r0 + eor r24,r2 + eor r25,r3 + std Z+46,r24 + std Z+47,r25 + movw r24,r20 + mov r0,r18 + com r0 + and r24,r0 + mov r0,r19 + com r0 + and r25,r0 + eor r24,r4 + eor r25,r5 + std Z+48,r24 + std Z+49,r25 + ret +1004: + st Z,r6 + std Z+1,r7 + std Z+2,r8 + std Z+3,r9 + std Z+4,r10 + std Z+5,r11 + std Z+6,r12 + std Z+7,r13 + std Z+8,r14 + std Z+9,r15 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size keccakp_400_permute, .-keccakp_400_permute + +#endif diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.c b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.c index c3c4011..60539df 100644 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.c +++ b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.c @@ -22,74 +22,79 @@ #include "internal-keccak.h" +#if !defined(__AVR__) + /* Faster method to compute ((x + y) % 5) that avoids the division */ static unsigned char const addMod5Table[9] = { 0, 1, 2, 3, 4, 0, 1, 2, 3 }; #define addMod5(x, y) (addMod5Table[(x) + (y)]) -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds) +void keccakp_200_permute(keccakp_200_state_t *state) { static uint8_t const RC[18] = { 0x01, 0x82, 0x8A, 0x00, 0x8B, 0x01, 0x81, 0x09, 0x8A, 0x88, 0x09, 0x0A, 0x8B, 0x8B, 0x89, 0x03, 0x02, 0x80 }; - uint8_t B[5][5]; + uint8_t C[5]; uint8_t D; unsigned round; unsigned index, index2; - for (round = 18 - rounds; round < 18; ++round) { + for (round = 0; round < 18; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_8(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_8(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate4_8(state->A[0][3]); - B[2][0] = leftRotate1_8(state->A[0][1]); - B[3][0] = leftRotate3_8(state->A[0][4]); - B[4][0] = leftRotate6_8(state->A[0][2]); - B[0][1] = leftRotate4_8(state->A[1][1]); - B[1][1] = leftRotate4_8(state->A[1][4]); - B[2][1] = leftRotate6_8(state->A[1][2]); - B[3][1] = leftRotate4_8(state->A[1][0]); - B[4][1] = leftRotate7_8(state->A[1][3]); - B[0][2] = leftRotate3_8(state->A[2][2]); - B[1][2] = leftRotate3_8(state->A[2][0]); - B[2][2] = leftRotate1_8(state->A[2][3]); - B[3][2] = leftRotate2_8(state->A[2][1]); - B[4][2] = leftRotate7_8(state->A[2][4]); - B[0][3] = leftRotate5_8(state->A[3][3]); - B[1][3] = leftRotate5_8(state->A[3][1]); - B[2][3] = state->A[3][4]; - B[3][3] = leftRotate7_8(state->A[3][2]); - B[4][3] = leftRotate1_8(state->A[3][0]); - B[0][4] = leftRotate6_8(state->A[4][4]); - B[1][4] = leftRotate5_8(state->A[4][2]); - B[2][4] = leftRotate2_8(state->A[4][0]); - B[3][4] = state->A[4][3]; - B[4][4] = leftRotate2_8(state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate4_8(state->A[1][1]); + state->A[1][1] = leftRotate4_8(state->A[1][4]); + state->A[1][4] = leftRotate5_8(state->A[4][2]); + state->A[4][2] = leftRotate7_8(state->A[2][4]); + state->A[2][4] = leftRotate2_8(state->A[4][0]); + state->A[4][0] = leftRotate6_8(state->A[0][2]); + state->A[0][2] = leftRotate3_8(state->A[2][2]); + state->A[2][2] = leftRotate1_8(state->A[2][3]); + state->A[2][3] = state->A[3][4]; + state->A[3][4] = state->A[4][3]; + state->A[4][3] = leftRotate1_8(state->A[3][0]); + state->A[3][0] = leftRotate3_8(state->A[0][4]); + state->A[0][4] = leftRotate6_8(state->A[4][4]); + state->A[4][4] = leftRotate2_8(state->A[4][1]); + state->A[4][1] = leftRotate7_8(state->A[1][3]); + state->A[1][3] = leftRotate5_8(state->A[3][1]); + state->A[3][1] = leftRotate4_8(state->A[1][0]); + state->A[1][0] = leftRotate4_8(state->A[0][3]); + state->A[0][3] = leftRotate5_8(state->A[3][3]); + state->A[3][3] = leftRotate7_8(state->A[3][2]); + state->A[3][2] = leftRotate2_8(state->A[2][1]); + state->A[2][1] = leftRotate6_8(state->A[1][2]); + state->A[1][2] = leftRotate3_8(state->A[2][0]); + state->A[2][0] = leftRotate1_8(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -110,61 +115,64 @@ void keccakp_400_permute_host(keccakp_400_state_t *state, unsigned rounds) 0x008A, 0x0088, 0x8009, 0x000A, 0x808B, 0x008B, 0x8089, 0x8003, 0x8002, 0x0080, 0x800A, 0x000A }; - uint16_t B[5][5]; + uint16_t C[5]; uint16_t D; unsigned round; unsigned index, index2; for (round = 20 - rounds; round < 20; ++round) { /* Step mapping theta. The specification mentions two temporary - * arrays of size 5 called C and D. To save a bit of memory, - * we use the first row of B to store C and compute D on the fly */ + * arrays of size 5 called C and D. Compute D on the fly */ for (index = 0; index < 5; ++index) { - B[0][index] = state->A[0][index] ^ state->A[1][index] ^ - state->A[2][index] ^ state->A[3][index] ^ - state->A[4][index]; + C[index] = state->A[0][index] ^ state->A[1][index] ^ + state->A[2][index] ^ state->A[3][index] ^ + state->A[4][index]; } for (index = 0; index < 5; ++index) { - D = B[0][addMod5(index, 4)] ^ - leftRotate1_16(B[0][addMod5(index, 1)]); + D = C[addMod5(index, 4)] ^ + leftRotate1_16(C[addMod5(index, 1)]); for (index2 = 0; index2 < 5; ++index2) state->A[index2][index] ^= D; } /* Step mapping rho and pi combined into a single step. * Rotate all lanes by a specific offset and rearrange */ - B[0][0] = state->A[0][0]; - B[1][0] = leftRotate12_16(state->A[0][3]); - B[2][0] = leftRotate1_16 (state->A[0][1]); - B[3][0] = leftRotate11_16(state->A[0][4]); - B[4][0] = leftRotate14_16(state->A[0][2]); - B[0][1] = leftRotate12_16(state->A[1][1]); - B[1][1] = leftRotate4_16 (state->A[1][4]); - B[2][1] = leftRotate6_16 (state->A[1][2]); - B[3][1] = leftRotate4_16 (state->A[1][0]); - B[4][1] = leftRotate7_16 (state->A[1][3]); - B[0][2] = leftRotate11_16(state->A[2][2]); - B[1][2] = leftRotate3_16 (state->A[2][0]); - B[2][2] = leftRotate9_16 (state->A[2][3]); - B[3][2] = leftRotate10_16(state->A[2][1]); - B[4][2] = leftRotate7_16 (state->A[2][4]); - B[0][3] = leftRotate5_16 (state->A[3][3]); - B[1][3] = leftRotate13_16(state->A[3][1]); - B[2][3] = leftRotate8_16 (state->A[3][4]); - B[3][3] = leftRotate15_16(state->A[3][2]); - B[4][3] = leftRotate9_16 (state->A[3][0]); - B[0][4] = leftRotate14_16(state->A[4][4]); - B[1][4] = leftRotate13_16(state->A[4][2]); - B[2][4] = leftRotate2_16 (state->A[4][0]); - B[3][4] = leftRotate8_16 (state->A[4][3]); - B[4][4] = leftRotate2_16 (state->A[4][1]); + D = state->A[0][1]; + state->A[0][1] = leftRotate12_16(state->A[1][1]); + state->A[1][1] = leftRotate4_16 (state->A[1][4]); + state->A[1][4] = leftRotate13_16(state->A[4][2]); + state->A[4][2] = leftRotate7_16 (state->A[2][4]); + state->A[2][4] = leftRotate2_16 (state->A[4][0]); + state->A[4][0] = leftRotate14_16(state->A[0][2]); + state->A[0][2] = leftRotate11_16(state->A[2][2]); + state->A[2][2] = leftRotate9_16 (state->A[2][3]); + state->A[2][3] = leftRotate8_16 (state->A[3][4]); + state->A[3][4] = leftRotate8_16 (state->A[4][3]); + state->A[4][3] = leftRotate9_16 (state->A[3][0]); + state->A[3][0] = leftRotate11_16(state->A[0][4]); + state->A[0][4] = leftRotate14_16(state->A[4][4]); + state->A[4][4] = leftRotate2_16 (state->A[4][1]); + state->A[4][1] = leftRotate7_16 (state->A[1][3]); + state->A[1][3] = leftRotate13_16(state->A[3][1]); + state->A[3][1] = leftRotate4_16 (state->A[1][0]); + state->A[1][0] = leftRotate12_16(state->A[0][3]); + state->A[0][3] = leftRotate5_16 (state->A[3][3]); + state->A[3][3] = leftRotate15_16(state->A[3][2]); + state->A[3][2] = leftRotate10_16(state->A[2][1]); + state->A[2][1] = leftRotate6_16 (state->A[1][2]); + state->A[1][2] = leftRotate3_16 (state->A[2][0]); + state->A[2][0] = leftRotate1_16(D); /* Step mapping chi. Combine each lane with two others in its row */ for (index = 0; index < 5; ++index) { + C[0] = state->A[index][0]; + C[1] = state->A[index][1]; + C[2] = state->A[index][2]; + C[3] = state->A[index][3]; + C[4] = state->A[index][4]; for (index2 = 0; index2 < 5; ++index2) { - state->A[index2][index] = - B[index2][index] ^ - ((~B[index2][addMod5(index, 1)]) & - B[index2][addMod5(index, 2)]); + state->A[index][index2] = + C[index2] ^ + ((~C[addMod5(index2, 1)]) & C[addMod5(index2, 2)]); } } @@ -202,3 +210,5 @@ void keccakp_400_permute(keccakp_400_state_t *state, unsigned rounds) } #endif + +#endif /* !__AVR__ */ diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.h b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.h index 026da50..2ffef42 100644 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.h +++ b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-keccak.h @@ -68,9 +68,8 @@ typedef union * \brief Permutes the Keccak-p[200] state. * * \param state The Keccak-p[200] state to be permuted. - * \param rounds The number of rounds to perform (up to 18). */ -void keccakp_200_permute(keccakp_200_state_t *state, unsigned rounds); +void keccakp_200_permute(keccakp_200_state_t *state); /** * \brief Permutes the Keccak-p[400] state, which is assumed to be in diff --git a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-util.h b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-util.h index e79158c..e30166d 100644 --- a/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-util.h +++ b/isap/Implementations/crypto_aead/isapk128v20/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.c b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.h b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/api.h b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/encrypt.c b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/encrypt.c deleted file mode 100644 index 0d644de..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "knot.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_128_256_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_128_256_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.c b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.h b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-util.h b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot-aead.c b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot-aead.c deleted file mode 100644 index 5825f01..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot-aead.c +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_cipher_t const knot_aead_128_256_cipher = { - "KNOT-AEAD-128-256", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_256_encrypt, - knot_aead_128_256_decrypt -}; - -aead_cipher_t const knot_aead_128_384_cipher = { - "KNOT-AEAD-128-384", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_384_encrypt, - knot_aead_128_384_decrypt -}; - -aead_cipher_t const knot_aead_192_384_cipher = { - "KNOT-AEAD-192-384", - KNOT_AEAD_192_KEY_SIZE, - KNOT_AEAD_192_NONCE_SIZE, - KNOT_AEAD_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_192_384_encrypt, - knot_aead_192_384_decrypt -}; - -aead_cipher_t const knot_aead_256_512_cipher = { - "KNOT-AEAD-256-512", - KNOT_AEAD_256_KEY_SIZE, - KNOT_AEAD_256_NONCE_SIZE, - KNOT_AEAD_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_256_512_encrypt, - knot_aead_256_512_decrypt -}; - -/** - * \brief Rate for KNOT-AEAD-128-256. - */ -#define KNOT_AEAD_128_256_RATE 8 - -/** - * \brief Rate for KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_384_RATE 24 - -/** - * \brief Rate for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_384_RATE 12 - -/** - * \brief Rate for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_512_RATE 16 - -/** - * \brief Absorbs the associated data into a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must be at least 1. - */ -static void knot_aead_absorb_ad - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= rate) { - lw_xor_block((unsigned char *)state, ad, rate); - permute(state, rounds); - ad += rate; - adlen -= rate; - } - rate = (unsigned)adlen; - lw_xor_block((unsigned char *)state, ad, rate); - ((unsigned char *)state)[rate] ^= 0x01; - permute(state, rounds); -} - -/** - * \brief Encrypts plaintext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param c Buffer to receive the ciphertext. - * \param m Buffer containing the plaintext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_encrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *c, const unsigned char *m, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -/** - * \brief Decrypts ciphertext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param m Buffer to receive the plaintext. - * \param c Buffer containing the ciphertext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_decrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *m, const unsigned char *c, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot256_permute_6(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot256_permute_6(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 44); - memcpy(c + mlen, state.B, KNOT_AEAD_192_TAG_SIZE); - return 0; -} - -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_192_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_192_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 44); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_192_TAG_SIZE); -} - -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot512_permute_7(&state, 56); - memcpy(c + mlen, state.B, KNOT_AEAD_256_TAG_SIZE); - return 0; -} - -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_256_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_256_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot512_permute_7(&state, 56); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_256_TAG_SIZE); -} diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot.h b/knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_aead/knot128v1/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot.c b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot.c index 3486e6e..f8b378e 100644 --- a/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot.c +++ b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-knot.c @@ -22,6 +22,8 @@ #include "internal-knot.h" +#if !defined(__AVR__) + /* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ static uint8_t const rc6[52] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, @@ -295,3 +297,5 @@ void knot512_permute_8(knot512_state_t *state, uint8_t rounds) { knot512_permute(state, rc8, rounds); } + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot128v1/rhys/internal-util.h b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/knot/Implementations/crypto_aead/knot128v1/rhys/internal-util.h +++ b/knot/Implementations/crypto_aead/knot128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.c b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.h b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/api.h b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/encrypt.c b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/encrypt.c deleted file mode 100644 index e80d720..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "knot.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_128_384_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_128_384_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.c b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.h b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-util.h b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot-aead.c b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot-aead.c deleted file mode 100644 index 5825f01..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot-aead.c +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_cipher_t const knot_aead_128_256_cipher = { - "KNOT-AEAD-128-256", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_256_encrypt, - knot_aead_128_256_decrypt -}; - -aead_cipher_t const knot_aead_128_384_cipher = { - "KNOT-AEAD-128-384", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_384_encrypt, - knot_aead_128_384_decrypt -}; - -aead_cipher_t const knot_aead_192_384_cipher = { - "KNOT-AEAD-192-384", - KNOT_AEAD_192_KEY_SIZE, - KNOT_AEAD_192_NONCE_SIZE, - KNOT_AEAD_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_192_384_encrypt, - knot_aead_192_384_decrypt -}; - -aead_cipher_t const knot_aead_256_512_cipher = { - "KNOT-AEAD-256-512", - KNOT_AEAD_256_KEY_SIZE, - KNOT_AEAD_256_NONCE_SIZE, - KNOT_AEAD_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_256_512_encrypt, - knot_aead_256_512_decrypt -}; - -/** - * \brief Rate for KNOT-AEAD-128-256. - */ -#define KNOT_AEAD_128_256_RATE 8 - -/** - * \brief Rate for KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_384_RATE 24 - -/** - * \brief Rate for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_384_RATE 12 - -/** - * \brief Rate for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_512_RATE 16 - -/** - * \brief Absorbs the associated data into a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must be at least 1. - */ -static void knot_aead_absorb_ad - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= rate) { - lw_xor_block((unsigned char *)state, ad, rate); - permute(state, rounds); - ad += rate; - adlen -= rate; - } - rate = (unsigned)adlen; - lw_xor_block((unsigned char *)state, ad, rate); - ((unsigned char *)state)[rate] ^= 0x01; - permute(state, rounds); -} - -/** - * \brief Encrypts plaintext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param c Buffer to receive the ciphertext. - * \param m Buffer containing the plaintext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_encrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *c, const unsigned char *m, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -/** - * \brief Decrypts ciphertext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param m Buffer to receive the plaintext. - * \param c Buffer containing the ciphertext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_decrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *m, const unsigned char *c, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot256_permute_6(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot256_permute_6(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 44); - memcpy(c + mlen, state.B, KNOT_AEAD_192_TAG_SIZE); - return 0; -} - -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_192_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_192_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 44); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_192_TAG_SIZE); -} - -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot512_permute_7(&state, 56); - memcpy(c + mlen, state.B, KNOT_AEAD_256_TAG_SIZE); - return 0; -} - -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_256_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_256_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot512_permute_7(&state, 56); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_256_TAG_SIZE); -} diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot.h b/knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_aead/knot128v2/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot.c b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot.c index 3486e6e..f8b378e 100644 --- a/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot.c +++ b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-knot.c @@ -22,6 +22,8 @@ #include "internal-knot.h" +#if !defined(__AVR__) + /* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ static uint8_t const rc6[52] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, @@ -295,3 +297,5 @@ void knot512_permute_8(knot512_state_t *state, uint8_t rounds) { knot512_permute(state, rc8, rounds); } + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot128v2/rhys/internal-util.h b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-util.h index e79158c..e30166d 100644 --- a/knot/Implementations/crypto_aead/knot128v2/rhys/internal-util.h +++ b/knot/Implementations/crypto_aead/knot128v2/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.c b/knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.h b/knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/api.h b/knot/Implementations/crypto_aead/knot192/rhys-avr/api.h deleted file mode 100644 index c340ebc..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 24 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 24 -#define CRYPTO_ABYTES 24 -#define CRYPTO_NOOVERLAP 1 diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/encrypt.c b/knot/Implementations/crypto_aead/knot192/rhys-avr/encrypt.c deleted file mode 100644 index 7d9ae8b..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "knot.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_192_384_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_192_384_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.c b/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.h b/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-util.h b/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/knot-aead.c b/knot/Implementations/crypto_aead/knot192/rhys-avr/knot-aead.c deleted file mode 100644 index 5825f01..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/knot-aead.c +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_cipher_t const knot_aead_128_256_cipher = { - "KNOT-AEAD-128-256", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_256_encrypt, - knot_aead_128_256_decrypt -}; - -aead_cipher_t const knot_aead_128_384_cipher = { - "KNOT-AEAD-128-384", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_384_encrypt, - knot_aead_128_384_decrypt -}; - -aead_cipher_t const knot_aead_192_384_cipher = { - "KNOT-AEAD-192-384", - KNOT_AEAD_192_KEY_SIZE, - KNOT_AEAD_192_NONCE_SIZE, - KNOT_AEAD_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_192_384_encrypt, - knot_aead_192_384_decrypt -}; - -aead_cipher_t const knot_aead_256_512_cipher = { - "KNOT-AEAD-256-512", - KNOT_AEAD_256_KEY_SIZE, - KNOT_AEAD_256_NONCE_SIZE, - KNOT_AEAD_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_256_512_encrypt, - knot_aead_256_512_decrypt -}; - -/** - * \brief Rate for KNOT-AEAD-128-256. - */ -#define KNOT_AEAD_128_256_RATE 8 - -/** - * \brief Rate for KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_384_RATE 24 - -/** - * \brief Rate for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_384_RATE 12 - -/** - * \brief Rate for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_512_RATE 16 - -/** - * \brief Absorbs the associated data into a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must be at least 1. - */ -static void knot_aead_absorb_ad - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= rate) { - lw_xor_block((unsigned char *)state, ad, rate); - permute(state, rounds); - ad += rate; - adlen -= rate; - } - rate = (unsigned)adlen; - lw_xor_block((unsigned char *)state, ad, rate); - ((unsigned char *)state)[rate] ^= 0x01; - permute(state, rounds); -} - -/** - * \brief Encrypts plaintext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param c Buffer to receive the ciphertext. - * \param m Buffer containing the plaintext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_encrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *c, const unsigned char *m, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -/** - * \brief Decrypts ciphertext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param m Buffer to receive the plaintext. - * \param c Buffer containing the ciphertext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_decrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *m, const unsigned char *c, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot256_permute_6(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot256_permute_6(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 44); - memcpy(c + mlen, state.B, KNOT_AEAD_192_TAG_SIZE); - return 0; -} - -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_192_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_192_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 44); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_192_TAG_SIZE); -} - -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot512_permute_7(&state, 56); - memcpy(c + mlen, state.B, KNOT_AEAD_256_TAG_SIZE); - return 0; -} - -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_256_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_256_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot512_permute_7(&state, 56); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_256_TAG_SIZE); -} diff --git a/knot/Implementations/crypto_aead/knot192/rhys-avr/knot.h b/knot/Implementations/crypto_aead/knot192/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_aead/knot192/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_aead/knot192/rhys/internal-knot.c b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot.c index 3486e6e..f8b378e 100644 --- a/knot/Implementations/crypto_aead/knot192/rhys/internal-knot.c +++ b/knot/Implementations/crypto_aead/knot192/rhys/internal-knot.c @@ -22,6 +22,8 @@ #include "internal-knot.h" +#if !defined(__AVR__) + /* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ static uint8_t const rc6[52] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, @@ -295,3 +297,5 @@ void knot512_permute_8(knot512_state_t *state, uint8_t rounds) { knot512_permute(state, rc8, rounds); } + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot192/rhys/internal-util.h b/knot/Implementations/crypto_aead/knot192/rhys/internal-util.h index e79158c..e30166d 100644 --- a/knot/Implementations/crypto_aead/knot192/rhys/internal-util.h +++ b/knot/Implementations/crypto_aead/knot192/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.c b/knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.h b/knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/api.h b/knot/Implementations/crypto_aead/knot256/rhys-avr/api.h deleted file mode 100644 index c11fc10..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 32 -#define CRYPTO_ABYTES 32 -#define CRYPTO_NOOVERLAP 1 diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/encrypt.c b/knot/Implementations/crypto_aead/knot256/rhys-avr/encrypt.c deleted file mode 100644 index 8f6225a..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "knot.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_256_512_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return knot_aead_256_512_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.c b/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.h b/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-util.h b/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/knot-aead.c b/knot/Implementations/crypto_aead/knot256/rhys-avr/knot-aead.c deleted file mode 100644 index 5825f01..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/knot-aead.c +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_cipher_t const knot_aead_128_256_cipher = { - "KNOT-AEAD-128-256", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_256_encrypt, - knot_aead_128_256_decrypt -}; - -aead_cipher_t const knot_aead_128_384_cipher = { - "KNOT-AEAD-128-384", - KNOT_AEAD_128_KEY_SIZE, - KNOT_AEAD_128_NONCE_SIZE, - KNOT_AEAD_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_128_384_encrypt, - knot_aead_128_384_decrypt -}; - -aead_cipher_t const knot_aead_192_384_cipher = { - "KNOT-AEAD-192-384", - KNOT_AEAD_192_KEY_SIZE, - KNOT_AEAD_192_NONCE_SIZE, - KNOT_AEAD_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_192_384_encrypt, - knot_aead_192_384_decrypt -}; - -aead_cipher_t const knot_aead_256_512_cipher = { - "KNOT-AEAD-256-512", - KNOT_AEAD_256_KEY_SIZE, - KNOT_AEAD_256_NONCE_SIZE, - KNOT_AEAD_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_aead_256_512_encrypt, - knot_aead_256_512_decrypt -}; - -/** - * \brief Rate for KNOT-AEAD-128-256. - */ -#define KNOT_AEAD_128_256_RATE 8 - -/** - * \brief Rate for KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_384_RATE 24 - -/** - * \brief Rate for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_384_RATE 12 - -/** - * \brief Rate for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_512_RATE 16 - -/** - * \brief Absorbs the associated data into a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must be at least 1. - */ -static void knot_aead_absorb_ad - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= rate) { - lw_xor_block((unsigned char *)state, ad, rate); - permute(state, rounds); - ad += rate; - adlen -= rate; - } - rate = (unsigned)adlen; - lw_xor_block((unsigned char *)state, ad, rate); - ((unsigned char *)state)[rate] ^= 0x01; - permute(state, rounds); -} - -/** - * \brief Encrypts plaintext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param c Buffer to receive the ciphertext. - * \param m Buffer containing the plaintext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_encrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *c, const unsigned char *m, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_2_dest(c, (unsigned char *)state, m, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -/** - * \brief Decrypts ciphertext data with a KNOT permutation state. - * - * \param state Points to the KNOT permutation state. - * \param permute Points to the function to perform the KNOT permutation. - * \param rounds Number of rounds to perform. - * \param rate Rate of absorption to use with the permutation. - * \param m Buffer to receive the plaintext. - * \param c Buffer containing the ciphertext. - * \param len Length of the plaintext data, must be at least 1. - */ -static void knot_aead_decrypt - (void *state, knot_permute_t permute, uint8_t rounds, unsigned rate, - unsigned char *m, const unsigned char *c, unsigned long long len) -{ - while (len >= rate) { - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - permute(state, rounds); - c += rate; - m += rate; - len -= rate; - } - rate = (unsigned)len; - lw_xor_block_swap(m, (unsigned char *)state, c, rate); - ((unsigned char *)state)[rate] ^= 0x01; -} - -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot256_permute_6(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot256_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - knot256_permute_6(&state, 52); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot256_permute_6, - 28, KNOT_AEAD_128_256_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot256_permute_6(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 32); - memcpy(c + mlen, state.B, KNOT_AEAD_128_TAG_SIZE); - return 0; -} - -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_128_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_128_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_128_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_128_NONCE_SIZE, k, KNOT_AEAD_128_KEY_SIZE); - memset(state.B + KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE, - 0, 47 - (KNOT_AEAD_128_NONCE_SIZE + KNOT_AEAD_128_KEY_SIZE)); - state.B[47] = 0x80; - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_128_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 28, KNOT_AEAD_128_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 32); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_128_TAG_SIZE); -} - -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot384_permute_7(&state, 44); - memcpy(c + mlen, state.B, KNOT_AEAD_192_TAG_SIZE); - return 0; -} - -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_192_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_192_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_192_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_192_NONCE_SIZE, k, KNOT_AEAD_192_KEY_SIZE); - knot384_permute_7(&state, 76); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_192_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot384_permute_7, - 40, KNOT_AEAD_192_384_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot384_permute_7(&state, 44); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_192_TAG_SIZE); -} - -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Encrypts the plaintext to produce the ciphertext */ - if (mlen > 0) { - knot_aead_encrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, c, m, mlen); - } - - /* Compute the authentication tag */ - knot512_permute_7(&state, 56); - memcpy(c + mlen, state.B, KNOT_AEAD_256_TAG_SIZE); - return 0; -} - -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - knot512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < KNOT_AEAD_256_TAG_SIZE) - return -1; - *mlen = clen - KNOT_AEAD_256_TAG_SIZE; - - /* Initialize the permutation state to the nonce and the key */ - memcpy(state.B, npub, KNOT_AEAD_256_NONCE_SIZE); - memcpy(state.B + KNOT_AEAD_256_NONCE_SIZE, k, KNOT_AEAD_256_KEY_SIZE); - knot512_permute_7(&state, 100); - - /* Absorb the associated data */ - if (adlen > 0) { - knot_aead_absorb_ad - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, ad, adlen); - } - state.B[sizeof(state.B) - 1] ^= 0x80; /* Domain separation */ - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= KNOT_AEAD_256_TAG_SIZE; - if (clen > 0) { - knot_aead_decrypt - (&state, (knot_permute_t)knot512_permute_7, - 52, KNOT_AEAD_256_512_RATE, m, c, clen); - } - - /* Check the authentication tag */ - knot512_permute_7(&state, 56); - return aead_check_tag - (m, clen, state.B, c + clen, KNOT_AEAD_256_TAG_SIZE); -} diff --git a/knot/Implementations/crypto_aead/knot256/rhys-avr/knot.h b/knot/Implementations/crypto_aead/knot256/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_aead/knot256/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_aead/knot256/rhys/internal-knot.c b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot.c index 3486e6e..f8b378e 100644 --- a/knot/Implementations/crypto_aead/knot256/rhys/internal-knot.c +++ b/knot/Implementations/crypto_aead/knot256/rhys/internal-knot.c @@ -22,6 +22,8 @@ #include "internal-knot.h" +#if !defined(__AVR__) + /* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ static uint8_t const rc6[52] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, @@ -295,3 +297,5 @@ void knot512_permute_8(knot512_state_t *state, uint8_t rounds) { knot512_permute(state, rc8, rounds); } + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_aead/knot256/rhys/internal-util.h b/knot/Implementations/crypto_aead/knot256/rhys/internal-util.h index e79158c..e30166d 100644 --- a/knot/Implementations/crypto_aead/knot256/rhys/internal-util.h +++ b/knot/Implementations/crypto_aead/knot256/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.c b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.h b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/api.h b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/hash.c b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/hash.c deleted file mode 100644 index 16409ba..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "knot.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return knot_hash_256_256(out, in, inlen); -} diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.c b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.h b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-util.h b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot-hash.c b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot-hash.c deleted file mode 100644 index a4edecd..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot-hash.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_hash_algorithm_t const knot_hash_256_256_algorithm = { - "KNOT-HASH-256-256", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_256, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_256_384_algorithm = { - "KNOT-HASH-256-384", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_384_384_algorithm = { - "KNOT-HASH-384-384", - sizeof(int), - KNOT_HASH_384_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_384_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_512_512_algorithm = { - "KNOT-HASH-512-512", - sizeof(int), - KNOT_HASH_512_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_512_512, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Input rate for KNOT-HASH-256-256. - */ -#define KNOT_HASH_256_256_RATE 4 - -/** - * \brief Input rate for KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_384_RATE 16 - -/** - * \brief Input rate for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_384_RATE 6 - -/** - * \brief Input rate for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_512_RATE 8 - -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot256_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_256_256_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); - knot256_permute_7(&state, 68); - in += KNOT_HASH_256_256_RATE; - inlen -= KNOT_HASH_256_256_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot256_permute_7(&state, 68); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot256_permute_7(&state, 68); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - state.B[sizeof(state.B) - 1] ^= 0x80; - while (inlen >= KNOT_HASH_256_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); - knot384_permute_7(&state, 80); - in += KNOT_HASH_256_384_RATE; - inlen -= KNOT_HASH_256_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 80); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot384_permute_7(&state, 80); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_384_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); - knot384_permute_7(&state, 104); - in += KNOT_HASH_384_384_RATE; - inlen -= KNOT_HASH_384_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 104); - memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); - knot384_permute_7(&state, 104); - memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); - return 0; -} - -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot512_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_512_512_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); - knot512_permute_8(&state, 140); - in += KNOT_HASH_512_512_RATE; - inlen -= KNOT_HASH_512_512_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot512_permute_8(&state, 140); - memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); - knot512_permute_8(&state, 140); - memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); - return 0; -} diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot.h b/knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_hash/knot256v1/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.c b/knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.h b/knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/api.h b/knot/Implementations/crypto_hash/knot256v1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/hash.c b/knot/Implementations/crypto_hash/knot256v1/rhys/hash.c new file mode 100644 index 0000000..16409ba --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "knot.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return knot_hash_256_256(out, in, inlen); +} diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.c b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.c new file mode 100644 index 0000000..f8b378e --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.c @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-knot.h" + +#if !defined(__AVR__) + +/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ +static uint8_t const rc6[52] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, + 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, + 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, + 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, + 0x0d, 0x1a, 0x35, 0x2a +}; +static uint8_t const rc7[104] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, + 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, + 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, + 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, + 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, + 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, + 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, + 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, + 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c +}; +static uint8_t const rc8[140] = { + 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, + 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, + 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, + 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, + 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, + 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, + 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, + 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, + 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, + 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, + 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, + 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 +}; + +/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ +#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint64_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ +#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint32_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +static void knot256_permute + (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b1, b2, b3; + + /* Load the input state into local variables; each row is 64 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x1, x2, x3, b1, b2, b3); + + /* Linear diffusion layer */ + x1 = leftRotate1_64(b1); + x2 = leftRotate8_64(b2); + x3 = leftRotate25_64(b3); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); +#endif +} + +void knot256_permute_6(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc6, rounds); +} + +void knot256_permute_7(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc7, rounds); +} + +void knot384_permute_7(knot384_state_t *state, uint8_t rounds) +{ + const uint8_t *rc = rc7; + uint64_t b2, b4, b6; + uint32_t b3, b5, b7; + + /* Load the input state into local variables; each row is 96 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint32_t x1 = state->W[2]; + uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); + uint32_t x3 = state->W[5]; + uint64_t x4 = state->S[3]; + uint32_t x5 = state->W[8]; + uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); + uint32_t x7 = state->W[11]; +#else + uint64_t x0 = le_load_word64(state->B); + uint32_t x1 = le_load_word32(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 12); + uint32_t x3 = le_load_word32(state->B + 20); + uint64_t x4 = le_load_word64(state->B + 24); + uint32_t x5 = le_load_word32(state->B + 32); + uint64_t x6 = le_load_word64(state->B + 36); + uint32_t x7 = le_load_word32(state->B + 44); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox32(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotateShort_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + #define leftRotateLong_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | \ + (((uint64_t)(b1)) << ((bits) - 32)) | \ + ((b0) >> (96 - (bits))); \ + (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ + } while (0) + leftRotateShort_96(x2, x3, b2, b3, 1); + leftRotateShort_96(x4, x5, b4, b5, 8); + leftRotateLong_96(x6, x7, b6, b7, 55); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->W[2] = x1; + state->W[3] = (uint32_t)x2; + state->W[4] = (uint32_t)(x2 >> 32); + state->W[5] = x3; + state->S[3] = x4; + state->W[8] = x5; + state->W[9] = (uint32_t)x6; + state->W[10] = (uint32_t)(x6 >> 32); + state->W[11] = x7; +#else + le_store_word64(state->B, x0); + le_store_word32(state->B + 8, x1); + le_store_word64(state->B + 12, x2); + le_store_word32(state->B + 20, x3); + le_store_word64(state->B + 24, x4); + le_store_word32(state->B + 32, x5); + le_store_word64(state->B + 36, x6); + le_store_word32(state->B + 44, x7); +#endif +} + +static void knot512_permute + (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b2, b3, b4, b5, b6, b7; + + /* Load the input state into local variables; each row is 128 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; + uint64_t x5 = state->S[5]; + uint64_t x6 = state->S[6]; + uint64_t x7 = state->S[7]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); + uint64_t x5 = le_load_word64(state->B + 40); + uint64_t x6 = le_load_word64(state->B + 48); + uint64_t x7 = le_load_word64(state->B + 56); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox64(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotate_128(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + leftRotate_128(x2, x3, b2, b3, 1); + leftRotate_128(x4, x5, b4, b5, 16); + leftRotate_128(x6, x7, b6, b7, 25); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; + state->S[5] = x5; + state->S[6] = x6; + state->S[7] = x7; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); + le_store_word64(state->B + 40, x5); + le_store_word64(state->B + 48, x6); + le_store_word64(state->B + 56, x7); +#endif +} + +void knot512_permute_7(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc7, rounds); +} + +void knot512_permute_8(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc8, rounds); +} + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.h b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.h new file mode 100644 index 0000000..88a782c --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-knot.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_KNOT_H +#define LW_INTERNAL_KNOT_H + +#include "internal-util.h" + +/** + * \file internal-knot.h + * \brief Permutations that are used by the KNOT AEAD and hash algorithms. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Internal state of the KNOT-256 permutation. + */ +typedef union +{ + uint64_t S[4]; /**< Words of the state */ + uint8_t B[32]; /**< Bytes of the state */ + +} knot256_state_t; + +/** + * \brief Internal state of the KNOT-384 permutation. + */ +typedef union +{ + uint64_t S[6]; /**< 64-bit words of the state */ + uint32_t W[12]; /**< 32-bit words of the state */ + uint8_t B[48]; /**< Bytes of the state */ + +} knot384_state_t; + +/** + * \brief Internal state of the KNOT-512 permutation. + */ +typedef union +{ + uint64_t S[8]; /**< Words of the state */ + uint8_t B[64]; /**< Bytes of the state */ + +} knot512_state_t; + +/** + * \brief Permutes the KNOT-256 state, using 6-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 52. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_6(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-256 state, using 7-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_7(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-384 state, using 7-bit round constants. + * + * \param state The KNOT-384 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot384_permute_7(knot384_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 7-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_7(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 8-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 140. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_8(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Generic pointer to a function that performs a KNOT permutation. + * + * \param state Points to the permutation state. + * \param round Number of rounds to perform. + */ +typedef void (*knot_permute_t)(void *state, uint8_t rounds); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/internal-util.h b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/knot-hash.c b/knot/Implementations/crypto_hash/knot256v1/rhys/knot-hash.c new file mode 100644 index 0000000..a4edecd --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/knot-hash.c @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "knot.h" +#include "internal-knot.h" +#include + +aead_hash_algorithm_t const knot_hash_256_256_algorithm = { + "KNOT-HASH-256-256", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_256, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_256_384_algorithm = { + "KNOT-HASH-256-384", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_384_384_algorithm = { + "KNOT-HASH-384-384", + sizeof(int), + KNOT_HASH_384_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_384_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_512_512_algorithm = { + "KNOT-HASH-512-512", + sizeof(int), + KNOT_HASH_512_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_512_512, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Input rate for KNOT-HASH-256-256. + */ +#define KNOT_HASH_256_256_RATE 4 + +/** + * \brief Input rate for KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_384_RATE 16 + +/** + * \brief Input rate for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_384_RATE 6 + +/** + * \brief Input rate for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_512_RATE 8 + +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot256_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_256_256_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); + knot256_permute_7(&state, 68); + in += KNOT_HASH_256_256_RATE; + inlen -= KNOT_HASH_256_256_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot256_permute_7(&state, 68); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot256_permute_7(&state, 68); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + state.B[sizeof(state.B) - 1] ^= 0x80; + while (inlen >= KNOT_HASH_256_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); + knot384_permute_7(&state, 80); + in += KNOT_HASH_256_384_RATE; + inlen -= KNOT_HASH_256_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 80); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot384_permute_7(&state, 80); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_384_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); + knot384_permute_7(&state, 104); + in += KNOT_HASH_384_384_RATE; + inlen -= KNOT_HASH_384_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 104); + memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); + knot384_permute_7(&state, 104); + memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); + return 0; +} + +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot512_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_512_512_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); + knot512_permute_8(&state, 140); + in += KNOT_HASH_512_512_RATE; + inlen -= KNOT_HASH_512_512_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot512_permute_8(&state, 140); + memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); + knot512_permute_8(&state, 140); + memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); + return 0; +} diff --git a/knot/Implementations/crypto_hash/knot256v1/rhys/knot.h b/knot/Implementations/crypto_hash/knot256v1/rhys/knot.h new file mode 100644 index 0000000..e2c5198 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v1/rhys/knot.h @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_KNOT_H +#define LWCRYPTO_KNOT_H + +#include "aead-common.h" + +/** + * \file knot.h + * \brief KNOT authenticated encryption and hash algorithms. + * + * KNOT is a family of authenticated encryption and hash algorithms built + * around a permutation and the MonkeyDuplex sponge construction. The + * family members are: + * + * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 256-bit permutation. This is the primary + * encryption member of the family. + * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a + * 192-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a + * 256-bit tag, built around a 512-bit permutation. + * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a + * 256-bit permutation. This is the primary hashing member of the family. + * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a + * 512-bit permutation. + * + * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-128-256 and + * KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_NONCE_SIZE 16 + +/** + * \brief Size of the key for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_KEY_SIZE 24 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_TAG_SIZE 24 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_NONCE_SIZE 24 + +/** + * \brief Size of the key for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_256_NONCE_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_SIZE 48 + +/** + * \brief Size of the hash for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_SIZE 64 + +/** + * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. + */ +extern aead_cipher_t const knot_aead_128_256_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. + */ +extern aead_cipher_t const knot_aead_128_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. + */ +extern aead_cipher_t const knot_aead_192_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. + */ +extern aead_cipher_t const knot_aead_256_512_cipher; + +/** + * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_256_decrypt() + */ +int knot_aead_128_256_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_256_encrypt() + */ +int knot_aead_128_256_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_384_decrypt() + */ +int knot_aead_128_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_384_encrypt() + */ +int knot_aead_128_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_192_384_decrypt() + */ +int knot_aead_192_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_192_384_encrypt() + */ +int knot_aead_192_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_256_512_decrypt() + */ +int knot_aead_256_512_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_256_512_encrypt() + */ +int knot_aead_256_512_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-256. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-384-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_384_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-512-512. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_512_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.c b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.h b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/api.h b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/hash.c b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/hash.c deleted file mode 100644 index 43a2745..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "knot.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return knot_hash_256_384(out, in, inlen); -} diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.c b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.h b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-util.h b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot-hash.c b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot-hash.c deleted file mode 100644 index a4edecd..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot-hash.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_hash_algorithm_t const knot_hash_256_256_algorithm = { - "KNOT-HASH-256-256", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_256, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_256_384_algorithm = { - "KNOT-HASH-256-384", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_384_384_algorithm = { - "KNOT-HASH-384-384", - sizeof(int), - KNOT_HASH_384_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_384_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_512_512_algorithm = { - "KNOT-HASH-512-512", - sizeof(int), - KNOT_HASH_512_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_512_512, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Input rate for KNOT-HASH-256-256. - */ -#define KNOT_HASH_256_256_RATE 4 - -/** - * \brief Input rate for KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_384_RATE 16 - -/** - * \brief Input rate for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_384_RATE 6 - -/** - * \brief Input rate for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_512_RATE 8 - -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot256_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_256_256_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); - knot256_permute_7(&state, 68); - in += KNOT_HASH_256_256_RATE; - inlen -= KNOT_HASH_256_256_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot256_permute_7(&state, 68); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot256_permute_7(&state, 68); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - state.B[sizeof(state.B) - 1] ^= 0x80; - while (inlen >= KNOT_HASH_256_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); - knot384_permute_7(&state, 80); - in += KNOT_HASH_256_384_RATE; - inlen -= KNOT_HASH_256_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 80); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot384_permute_7(&state, 80); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_384_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); - knot384_permute_7(&state, 104); - in += KNOT_HASH_384_384_RATE; - inlen -= KNOT_HASH_384_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 104); - memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); - knot384_permute_7(&state, 104); - memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); - return 0; -} - -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot512_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_512_512_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); - knot512_permute_8(&state, 140); - in += KNOT_HASH_512_512_RATE; - inlen -= KNOT_HASH_512_512_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot512_permute_8(&state, 140); - memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); - knot512_permute_8(&state, 140); - memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); - return 0; -} diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot.h b/knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_hash/knot256v2/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.c b/knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.h b/knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/api.h b/knot/Implementations/crypto_hash/knot256v2/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/hash.c b/knot/Implementations/crypto_hash/knot256v2/rhys/hash.c new file mode 100644 index 0000000..43a2745 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "knot.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return knot_hash_256_384(out, in, inlen); +} diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.c b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.c new file mode 100644 index 0000000..f8b378e --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.c @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-knot.h" + +#if !defined(__AVR__) + +/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ +static uint8_t const rc6[52] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, + 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, + 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, + 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, + 0x0d, 0x1a, 0x35, 0x2a +}; +static uint8_t const rc7[104] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, + 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, + 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, + 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, + 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, + 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, + 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, + 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, + 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c +}; +static uint8_t const rc8[140] = { + 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, + 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, + 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, + 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, + 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, + 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, + 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, + 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, + 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, + 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, + 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, + 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 +}; + +/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ +#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint64_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ +#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint32_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +static void knot256_permute + (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b1, b2, b3; + + /* Load the input state into local variables; each row is 64 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x1, x2, x3, b1, b2, b3); + + /* Linear diffusion layer */ + x1 = leftRotate1_64(b1); + x2 = leftRotate8_64(b2); + x3 = leftRotate25_64(b3); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); +#endif +} + +void knot256_permute_6(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc6, rounds); +} + +void knot256_permute_7(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc7, rounds); +} + +void knot384_permute_7(knot384_state_t *state, uint8_t rounds) +{ + const uint8_t *rc = rc7; + uint64_t b2, b4, b6; + uint32_t b3, b5, b7; + + /* Load the input state into local variables; each row is 96 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint32_t x1 = state->W[2]; + uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); + uint32_t x3 = state->W[5]; + uint64_t x4 = state->S[3]; + uint32_t x5 = state->W[8]; + uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); + uint32_t x7 = state->W[11]; +#else + uint64_t x0 = le_load_word64(state->B); + uint32_t x1 = le_load_word32(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 12); + uint32_t x3 = le_load_word32(state->B + 20); + uint64_t x4 = le_load_word64(state->B + 24); + uint32_t x5 = le_load_word32(state->B + 32); + uint64_t x6 = le_load_word64(state->B + 36); + uint32_t x7 = le_load_word32(state->B + 44); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox32(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotateShort_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + #define leftRotateLong_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | \ + (((uint64_t)(b1)) << ((bits) - 32)) | \ + ((b0) >> (96 - (bits))); \ + (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ + } while (0) + leftRotateShort_96(x2, x3, b2, b3, 1); + leftRotateShort_96(x4, x5, b4, b5, 8); + leftRotateLong_96(x6, x7, b6, b7, 55); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->W[2] = x1; + state->W[3] = (uint32_t)x2; + state->W[4] = (uint32_t)(x2 >> 32); + state->W[5] = x3; + state->S[3] = x4; + state->W[8] = x5; + state->W[9] = (uint32_t)x6; + state->W[10] = (uint32_t)(x6 >> 32); + state->W[11] = x7; +#else + le_store_word64(state->B, x0); + le_store_word32(state->B + 8, x1); + le_store_word64(state->B + 12, x2); + le_store_word32(state->B + 20, x3); + le_store_word64(state->B + 24, x4); + le_store_word32(state->B + 32, x5); + le_store_word64(state->B + 36, x6); + le_store_word32(state->B + 44, x7); +#endif +} + +static void knot512_permute + (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b2, b3, b4, b5, b6, b7; + + /* Load the input state into local variables; each row is 128 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; + uint64_t x5 = state->S[5]; + uint64_t x6 = state->S[6]; + uint64_t x7 = state->S[7]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); + uint64_t x5 = le_load_word64(state->B + 40); + uint64_t x6 = le_load_word64(state->B + 48); + uint64_t x7 = le_load_word64(state->B + 56); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox64(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotate_128(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + leftRotate_128(x2, x3, b2, b3, 1); + leftRotate_128(x4, x5, b4, b5, 16); + leftRotate_128(x6, x7, b6, b7, 25); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; + state->S[5] = x5; + state->S[6] = x6; + state->S[7] = x7; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); + le_store_word64(state->B + 40, x5); + le_store_word64(state->B + 48, x6); + le_store_word64(state->B + 56, x7); +#endif +} + +void knot512_permute_7(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc7, rounds); +} + +void knot512_permute_8(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc8, rounds); +} + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.h b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.h new file mode 100644 index 0000000..88a782c --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-knot.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_KNOT_H +#define LW_INTERNAL_KNOT_H + +#include "internal-util.h" + +/** + * \file internal-knot.h + * \brief Permutations that are used by the KNOT AEAD and hash algorithms. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Internal state of the KNOT-256 permutation. + */ +typedef union +{ + uint64_t S[4]; /**< Words of the state */ + uint8_t B[32]; /**< Bytes of the state */ + +} knot256_state_t; + +/** + * \brief Internal state of the KNOT-384 permutation. + */ +typedef union +{ + uint64_t S[6]; /**< 64-bit words of the state */ + uint32_t W[12]; /**< 32-bit words of the state */ + uint8_t B[48]; /**< Bytes of the state */ + +} knot384_state_t; + +/** + * \brief Internal state of the KNOT-512 permutation. + */ +typedef union +{ + uint64_t S[8]; /**< Words of the state */ + uint8_t B[64]; /**< Bytes of the state */ + +} knot512_state_t; + +/** + * \brief Permutes the KNOT-256 state, using 6-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 52. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_6(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-256 state, using 7-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_7(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-384 state, using 7-bit round constants. + * + * \param state The KNOT-384 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot384_permute_7(knot384_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 7-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_7(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 8-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 140. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_8(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Generic pointer to a function that performs a KNOT permutation. + * + * \param state Points to the permutation state. + * \param round Number of rounds to perform. + */ +typedef void (*knot_permute_t)(void *state, uint8_t rounds); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/internal-util.h b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/knot-hash.c b/knot/Implementations/crypto_hash/knot256v2/rhys/knot-hash.c new file mode 100644 index 0000000..a4edecd --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/knot-hash.c @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "knot.h" +#include "internal-knot.h" +#include + +aead_hash_algorithm_t const knot_hash_256_256_algorithm = { + "KNOT-HASH-256-256", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_256, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_256_384_algorithm = { + "KNOT-HASH-256-384", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_384_384_algorithm = { + "KNOT-HASH-384-384", + sizeof(int), + KNOT_HASH_384_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_384_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_512_512_algorithm = { + "KNOT-HASH-512-512", + sizeof(int), + KNOT_HASH_512_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_512_512, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Input rate for KNOT-HASH-256-256. + */ +#define KNOT_HASH_256_256_RATE 4 + +/** + * \brief Input rate for KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_384_RATE 16 + +/** + * \brief Input rate for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_384_RATE 6 + +/** + * \brief Input rate for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_512_RATE 8 + +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot256_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_256_256_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); + knot256_permute_7(&state, 68); + in += KNOT_HASH_256_256_RATE; + inlen -= KNOT_HASH_256_256_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot256_permute_7(&state, 68); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot256_permute_7(&state, 68); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + state.B[sizeof(state.B) - 1] ^= 0x80; + while (inlen >= KNOT_HASH_256_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); + knot384_permute_7(&state, 80); + in += KNOT_HASH_256_384_RATE; + inlen -= KNOT_HASH_256_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 80); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot384_permute_7(&state, 80); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_384_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); + knot384_permute_7(&state, 104); + in += KNOT_HASH_384_384_RATE; + inlen -= KNOT_HASH_384_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 104); + memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); + knot384_permute_7(&state, 104); + memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); + return 0; +} + +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot512_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_512_512_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); + knot512_permute_8(&state, 140); + in += KNOT_HASH_512_512_RATE; + inlen -= KNOT_HASH_512_512_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot512_permute_8(&state, 140); + memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); + knot512_permute_8(&state, 140); + memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); + return 0; +} diff --git a/knot/Implementations/crypto_hash/knot256v2/rhys/knot.h b/knot/Implementations/crypto_hash/knot256v2/rhys/knot.h new file mode 100644 index 0000000..e2c5198 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot256v2/rhys/knot.h @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_KNOT_H +#define LWCRYPTO_KNOT_H + +#include "aead-common.h" + +/** + * \file knot.h + * \brief KNOT authenticated encryption and hash algorithms. + * + * KNOT is a family of authenticated encryption and hash algorithms built + * around a permutation and the MonkeyDuplex sponge construction. The + * family members are: + * + * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 256-bit permutation. This is the primary + * encryption member of the family. + * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a + * 192-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a + * 256-bit tag, built around a 512-bit permutation. + * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a + * 256-bit permutation. This is the primary hashing member of the family. + * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a + * 512-bit permutation. + * + * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-128-256 and + * KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_NONCE_SIZE 16 + +/** + * \brief Size of the key for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_KEY_SIZE 24 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_TAG_SIZE 24 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_NONCE_SIZE 24 + +/** + * \brief Size of the key for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_256_NONCE_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_SIZE 48 + +/** + * \brief Size of the hash for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_SIZE 64 + +/** + * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. + */ +extern aead_cipher_t const knot_aead_128_256_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. + */ +extern aead_cipher_t const knot_aead_128_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. + */ +extern aead_cipher_t const knot_aead_192_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. + */ +extern aead_cipher_t const knot_aead_256_512_cipher; + +/** + * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_256_decrypt() + */ +int knot_aead_128_256_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_256_encrypt() + */ +int knot_aead_128_256_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_384_decrypt() + */ +int knot_aead_128_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_384_encrypt() + */ +int knot_aead_128_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_192_384_decrypt() + */ +int knot_aead_192_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_192_384_encrypt() + */ +int knot_aead_192_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_256_512_decrypt() + */ +int knot_aead_256_512_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_256_512_encrypt() + */ +int knot_aead_256_512_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-256. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-384-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_384_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-512-512. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_512_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.c b/knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.h b/knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/api.h b/knot/Implementations/crypto_hash/knot384/rhys-avr/api.h deleted file mode 100644 index d507385..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 48 diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/hash.c b/knot/Implementations/crypto_hash/knot384/rhys-avr/hash.c deleted file mode 100644 index 2f63a7a..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "knot.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return knot_hash_384_384(out, in, inlen); -} diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.c b/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.h b/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-util.h b/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/knot-hash.c b/knot/Implementations/crypto_hash/knot384/rhys-avr/knot-hash.c deleted file mode 100644 index a4edecd..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/knot-hash.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_hash_algorithm_t const knot_hash_256_256_algorithm = { - "KNOT-HASH-256-256", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_256, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_256_384_algorithm = { - "KNOT-HASH-256-384", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_384_384_algorithm = { - "KNOT-HASH-384-384", - sizeof(int), - KNOT_HASH_384_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_384_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_512_512_algorithm = { - "KNOT-HASH-512-512", - sizeof(int), - KNOT_HASH_512_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_512_512, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Input rate for KNOT-HASH-256-256. - */ -#define KNOT_HASH_256_256_RATE 4 - -/** - * \brief Input rate for KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_384_RATE 16 - -/** - * \brief Input rate for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_384_RATE 6 - -/** - * \brief Input rate for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_512_RATE 8 - -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot256_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_256_256_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); - knot256_permute_7(&state, 68); - in += KNOT_HASH_256_256_RATE; - inlen -= KNOT_HASH_256_256_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot256_permute_7(&state, 68); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot256_permute_7(&state, 68); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - state.B[sizeof(state.B) - 1] ^= 0x80; - while (inlen >= KNOT_HASH_256_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); - knot384_permute_7(&state, 80); - in += KNOT_HASH_256_384_RATE; - inlen -= KNOT_HASH_256_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 80); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot384_permute_7(&state, 80); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_384_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); - knot384_permute_7(&state, 104); - in += KNOT_HASH_384_384_RATE; - inlen -= KNOT_HASH_384_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 104); - memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); - knot384_permute_7(&state, 104); - memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); - return 0; -} - -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot512_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_512_512_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); - knot512_permute_8(&state, 140); - in += KNOT_HASH_512_512_RATE; - inlen -= KNOT_HASH_512_512_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot512_permute_8(&state, 140); - memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); - knot512_permute_8(&state, 140); - memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); - return 0; -} diff --git a/knot/Implementations/crypto_hash/knot384/rhys-avr/knot.h b/knot/Implementations/crypto_hash/knot384/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_hash/knot384/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys/aead-common.c b/knot/Implementations/crypto_hash/knot384/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/knot/Implementations/crypto_hash/knot384/rhys/aead-common.h b/knot/Implementations/crypto_hash/knot384/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys/api.h b/knot/Implementations/crypto_hash/knot384/rhys/api.h new file mode 100644 index 0000000..d507385 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 48 diff --git a/knot/Implementations/crypto_hash/knot384/rhys/hash.c b/knot/Implementations/crypto_hash/knot384/rhys/hash.c new file mode 100644 index 0000000..2f63a7a --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "knot.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return knot_hash_384_384(out, in, inlen); +} diff --git a/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys/internal-knot.c b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot.c new file mode 100644 index 0000000..f8b378e --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot.c @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-knot.h" + +#if !defined(__AVR__) + +/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ +static uint8_t const rc6[52] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, + 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, + 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, + 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, + 0x0d, 0x1a, 0x35, 0x2a +}; +static uint8_t const rc7[104] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, + 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, + 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, + 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, + 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, + 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, + 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, + 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, + 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c +}; +static uint8_t const rc8[140] = { + 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, + 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, + 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, + 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, + 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, + 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, + 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, + 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, + 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, + 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, + 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, + 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 +}; + +/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ +#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint64_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ +#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint32_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +static void knot256_permute + (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b1, b2, b3; + + /* Load the input state into local variables; each row is 64 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x1, x2, x3, b1, b2, b3); + + /* Linear diffusion layer */ + x1 = leftRotate1_64(b1); + x2 = leftRotate8_64(b2); + x3 = leftRotate25_64(b3); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); +#endif +} + +void knot256_permute_6(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc6, rounds); +} + +void knot256_permute_7(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc7, rounds); +} + +void knot384_permute_7(knot384_state_t *state, uint8_t rounds) +{ + const uint8_t *rc = rc7; + uint64_t b2, b4, b6; + uint32_t b3, b5, b7; + + /* Load the input state into local variables; each row is 96 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint32_t x1 = state->W[2]; + uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); + uint32_t x3 = state->W[5]; + uint64_t x4 = state->S[3]; + uint32_t x5 = state->W[8]; + uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); + uint32_t x7 = state->W[11]; +#else + uint64_t x0 = le_load_word64(state->B); + uint32_t x1 = le_load_word32(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 12); + uint32_t x3 = le_load_word32(state->B + 20); + uint64_t x4 = le_load_word64(state->B + 24); + uint32_t x5 = le_load_word32(state->B + 32); + uint64_t x6 = le_load_word64(state->B + 36); + uint32_t x7 = le_load_word32(state->B + 44); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox32(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotateShort_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + #define leftRotateLong_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | \ + (((uint64_t)(b1)) << ((bits) - 32)) | \ + ((b0) >> (96 - (bits))); \ + (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ + } while (0) + leftRotateShort_96(x2, x3, b2, b3, 1); + leftRotateShort_96(x4, x5, b4, b5, 8); + leftRotateLong_96(x6, x7, b6, b7, 55); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->W[2] = x1; + state->W[3] = (uint32_t)x2; + state->W[4] = (uint32_t)(x2 >> 32); + state->W[5] = x3; + state->S[3] = x4; + state->W[8] = x5; + state->W[9] = (uint32_t)x6; + state->W[10] = (uint32_t)(x6 >> 32); + state->W[11] = x7; +#else + le_store_word64(state->B, x0); + le_store_word32(state->B + 8, x1); + le_store_word64(state->B + 12, x2); + le_store_word32(state->B + 20, x3); + le_store_word64(state->B + 24, x4); + le_store_word32(state->B + 32, x5); + le_store_word64(state->B + 36, x6); + le_store_word32(state->B + 44, x7); +#endif +} + +static void knot512_permute + (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b2, b3, b4, b5, b6, b7; + + /* Load the input state into local variables; each row is 128 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; + uint64_t x5 = state->S[5]; + uint64_t x6 = state->S[6]; + uint64_t x7 = state->S[7]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); + uint64_t x5 = le_load_word64(state->B + 40); + uint64_t x6 = le_load_word64(state->B + 48); + uint64_t x7 = le_load_word64(state->B + 56); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox64(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotate_128(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + leftRotate_128(x2, x3, b2, b3, 1); + leftRotate_128(x4, x5, b4, b5, 16); + leftRotate_128(x6, x7, b6, b7, 25); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; + state->S[5] = x5; + state->S[6] = x6; + state->S[7] = x7; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); + le_store_word64(state->B + 40, x5); + le_store_word64(state->B + 48, x6); + le_store_word64(state->B + 56, x7); +#endif +} + +void knot512_permute_7(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc7, rounds); +} + +void knot512_permute_8(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc8, rounds); +} + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot384/rhys/internal-knot.h b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot.h new file mode 100644 index 0000000..88a782c --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/internal-knot.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_KNOT_H +#define LW_INTERNAL_KNOT_H + +#include "internal-util.h" + +/** + * \file internal-knot.h + * \brief Permutations that are used by the KNOT AEAD and hash algorithms. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Internal state of the KNOT-256 permutation. + */ +typedef union +{ + uint64_t S[4]; /**< Words of the state */ + uint8_t B[32]; /**< Bytes of the state */ + +} knot256_state_t; + +/** + * \brief Internal state of the KNOT-384 permutation. + */ +typedef union +{ + uint64_t S[6]; /**< 64-bit words of the state */ + uint32_t W[12]; /**< 32-bit words of the state */ + uint8_t B[48]; /**< Bytes of the state */ + +} knot384_state_t; + +/** + * \brief Internal state of the KNOT-512 permutation. + */ +typedef union +{ + uint64_t S[8]; /**< Words of the state */ + uint8_t B[64]; /**< Bytes of the state */ + +} knot512_state_t; + +/** + * \brief Permutes the KNOT-256 state, using 6-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 52. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_6(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-256 state, using 7-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_7(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-384 state, using 7-bit round constants. + * + * \param state The KNOT-384 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot384_permute_7(knot384_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 7-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_7(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 8-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 140. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_8(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Generic pointer to a function that performs a KNOT permutation. + * + * \param state Points to the permutation state. + * \param round Number of rounds to perform. + */ +typedef void (*knot_permute_t)(void *state, uint8_t rounds); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys/internal-util.h b/knot/Implementations/crypto_hash/knot384/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/knot/Implementations/crypto_hash/knot384/rhys/knot-hash.c b/knot/Implementations/crypto_hash/knot384/rhys/knot-hash.c new file mode 100644 index 0000000..a4edecd --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/knot-hash.c @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "knot.h" +#include "internal-knot.h" +#include + +aead_hash_algorithm_t const knot_hash_256_256_algorithm = { + "KNOT-HASH-256-256", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_256, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_256_384_algorithm = { + "KNOT-HASH-256-384", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_384_384_algorithm = { + "KNOT-HASH-384-384", + sizeof(int), + KNOT_HASH_384_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_384_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_512_512_algorithm = { + "KNOT-HASH-512-512", + sizeof(int), + KNOT_HASH_512_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_512_512, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Input rate for KNOT-HASH-256-256. + */ +#define KNOT_HASH_256_256_RATE 4 + +/** + * \brief Input rate for KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_384_RATE 16 + +/** + * \brief Input rate for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_384_RATE 6 + +/** + * \brief Input rate for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_512_RATE 8 + +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot256_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_256_256_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); + knot256_permute_7(&state, 68); + in += KNOT_HASH_256_256_RATE; + inlen -= KNOT_HASH_256_256_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot256_permute_7(&state, 68); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot256_permute_7(&state, 68); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + state.B[sizeof(state.B) - 1] ^= 0x80; + while (inlen >= KNOT_HASH_256_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); + knot384_permute_7(&state, 80); + in += KNOT_HASH_256_384_RATE; + inlen -= KNOT_HASH_256_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 80); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot384_permute_7(&state, 80); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_384_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); + knot384_permute_7(&state, 104); + in += KNOT_HASH_384_384_RATE; + inlen -= KNOT_HASH_384_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 104); + memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); + knot384_permute_7(&state, 104); + memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); + return 0; +} + +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot512_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_512_512_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); + knot512_permute_8(&state, 140); + in += KNOT_HASH_512_512_RATE; + inlen -= KNOT_HASH_512_512_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot512_permute_8(&state, 140); + memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); + knot512_permute_8(&state, 140); + memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); + return 0; +} diff --git a/knot/Implementations/crypto_hash/knot384/rhys/knot.h b/knot/Implementations/crypto_hash/knot384/rhys/knot.h new file mode 100644 index 0000000..e2c5198 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot384/rhys/knot.h @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_KNOT_H +#define LWCRYPTO_KNOT_H + +#include "aead-common.h" + +/** + * \file knot.h + * \brief KNOT authenticated encryption and hash algorithms. + * + * KNOT is a family of authenticated encryption and hash algorithms built + * around a permutation and the MonkeyDuplex sponge construction. The + * family members are: + * + * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 256-bit permutation. This is the primary + * encryption member of the family. + * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a + * 192-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a + * 256-bit tag, built around a 512-bit permutation. + * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a + * 256-bit permutation. This is the primary hashing member of the family. + * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a + * 512-bit permutation. + * + * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-128-256 and + * KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_NONCE_SIZE 16 + +/** + * \brief Size of the key for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_KEY_SIZE 24 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_TAG_SIZE 24 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_NONCE_SIZE 24 + +/** + * \brief Size of the key for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_256_NONCE_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_SIZE 48 + +/** + * \brief Size of the hash for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_SIZE 64 + +/** + * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. + */ +extern aead_cipher_t const knot_aead_128_256_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. + */ +extern aead_cipher_t const knot_aead_128_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. + */ +extern aead_cipher_t const knot_aead_192_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. + */ +extern aead_cipher_t const knot_aead_256_512_cipher; + +/** + * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_256_decrypt() + */ +int knot_aead_128_256_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_256_encrypt() + */ +int knot_aead_128_256_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_384_decrypt() + */ +int knot_aead_128_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_384_encrypt() + */ +int knot_aead_128_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_192_384_decrypt() + */ +int knot_aead_192_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_192_384_encrypt() + */ +int knot_aead_192_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_256_512_decrypt() + */ +int knot_aead_256_512_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_256_512_encrypt() + */ +int knot_aead_256_512_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-256. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-384-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_384_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-512-512. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_512_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.c b/knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.h b/knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/api.h b/knot/Implementations/crypto_hash/knot512/rhys-avr/api.h deleted file mode 100644 index de9380d..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 64 diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/hash.c b/knot/Implementations/crypto_hash/knot512/rhys-avr/hash.c deleted file mode 100644 index 7c0a3b3..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "knot.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return knot_hash_512_512(out, in, inlen); -} diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-256-avr.S deleted file mode 100644 index 15e6389..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-256-avr.S +++ /dev/null @@ -1,1093 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_6, @object - .size table_6, 52 -table_6: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 33 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 49 - .byte 34 - .byte 5 - .byte 10 - .byte 20 - .byte 41 - .byte 19 - .byte 39 - .byte 15 - .byte 30 - .byte 61 - .byte 58 - .byte 52 - .byte 40 - .byte 17 - .byte 35 - .byte 7 - .byte 14 - .byte 28 - .byte 57 - .byte 50 - .byte 36 - .byte 9 - .byte 18 - .byte 37 - .byte 11 - .byte 22 - .byte 45 - .byte 27 - .byte 55 - .byte 46 - .byte 29 - .byte 59 - .byte 54 - .byte 44 - .byte 25 - .byte 51 - .byte 38 - .byte 13 - .byte 26 - .byte 53 - .byte 42 - - .text -.global knot256_permute_6 - .type knot256_permute_6, @function -knot256_permute_6: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_6) - ldi r31,hi8(table_6) -#if defined(RAMPZ) - ldi r17,hh8(table_6) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_6, .-knot256_permute_6 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot256_permute_7 - .type knot256_permute_7, @function -knot256_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 57 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Z+16 - ldd r9,Z+17 - ldd r10,Z+18 - ldd r11,Z+19 - ldd r12,Z+20 - ldd r13,Z+21 - ldd r14,Z+22 - ldd r15,Z+23 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r8 - std Y+18,r9 - std Y+19,r10 - std Y+20,r11 - std Y+21,r12 - std Y+22,r13 - std Y+23,r14 - std Y+24,r15 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -59: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r18,r23 - inc r30 - ldd r23,Y+1 - ldd r4,Y+9 - ldd r5,Y+17 - mov r24,r18 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+33,r7 - mov r16,r5 - eor r16,r24 - mov r8,r23 - or r8,r4 - eor r8,r16 - mov r24,r23 - eor r24,r5 - mov r18,r25 - and r18,r16 - eor r18,r24 - mov r6,r8 - and r6,r24 - eor r6,r25 - std Y+25,r6 - ldd r23,Y+2 - ldd r4,Y+10 - ldd r5,Y+18 - mov r24,r19 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+34,r7 - mov r16,r5 - eor r16,r24 - mov r9,r23 - or r9,r4 - eor r9,r16 - mov r24,r23 - eor r24,r5 - mov r19,r25 - and r19,r16 - eor r19,r24 - mov r6,r9 - and r6,r24 - eor r6,r25 - std Y+26,r6 - ldd r23,Y+3 - ldd r4,Y+11 - ldd r5,Y+19 - mov r24,r20 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+35,r7 - mov r16,r5 - eor r16,r24 - mov r10,r23 - or r10,r4 - eor r10,r16 - mov r24,r23 - eor r24,r5 - mov r20,r25 - and r20,r16 - eor r20,r24 - mov r6,r10 - and r6,r24 - eor r6,r25 - std Y+27,r6 - ldd r23,Y+4 - ldd r4,Y+12 - ldd r5,Y+20 - mov r24,r21 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+36,r7 - mov r16,r5 - eor r16,r24 - mov r11,r23 - or r11,r4 - eor r11,r16 - mov r24,r23 - eor r24,r5 - mov r21,r25 - and r21,r16 - eor r21,r24 - mov r6,r11 - and r6,r24 - eor r6,r25 - std Y+28,r6 - ldd r23,Y+5 - ldd r4,Y+13 - ldd r5,Y+21 - mov r24,r26 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+37,r7 - mov r16,r5 - eor r16,r24 - mov r12,r23 - or r12,r4 - eor r12,r16 - mov r24,r23 - eor r24,r5 - mov r26,r25 - and r26,r16 - eor r26,r24 - mov r6,r12 - and r6,r24 - eor r6,r25 - std Y+29,r6 - ldd r23,Y+6 - ldd r4,Y+14 - ldd r5,Y+22 - mov r24,r27 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+38,r7 - mov r16,r5 - eor r16,r24 - mov r13,r23 - or r13,r4 - eor r13,r16 - mov r24,r23 - eor r24,r5 - mov r27,r25 - and r27,r16 - eor r27,r24 - mov r6,r13 - and r6,r24 - eor r6,r25 - std Y+30,r6 - ldd r23,Y+7 - ldd r4,Y+15 - ldd r5,Y+23 - mov r24,r2 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+39,r7 - mov r16,r5 - eor r16,r24 - mov r14,r23 - or r14,r4 - eor r14,r16 - mov r24,r23 - eor r24,r5 - mov r2,r25 - and r2,r16 - eor r2,r24 - mov r6,r14 - and r6,r24 - eor r6,r25 - std Y+31,r6 - ldd r23,Y+8 - ldd r4,Y+16 - ldd r5,Y+24 - mov r24,r3 - com r24 - mov r25,r23 - and r25,r24 - eor r25,r4 - mov r7,r5 - eor r7,r25 - std Y+40,r7 - mov r16,r5 - eor r16,r24 - mov r15,r23 - or r15,r4 - eor r15,r16 - mov r24,r23 - eor r24,r5 - mov r3,r25 - and r3,r16 - eor r3,r24 - mov r6,r15 - and r6,r24 - eor r6,r25 - std Y+32,r6 - std Y+9,r15 - std Y+10,r8 - std Y+11,r9 - std Y+12,r10 - std Y+13,r11 - std Y+14,r12 - std Y+15,r13 - std Y+16,r14 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - std Y+5,r12 - std Y+6,r13 - std Y+7,r14 - std Y+8,r15 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - ldd r12,Y+37 - ldd r13,Y+38 - ldd r14,Y+39 - ldd r15,Y+40 - lsl r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r8,r1 - std Y+17,r13 - std Y+18,r14 - std Y+19,r15 - std Y+20,r8 - std Y+21,r9 - std Y+22,r10 - std Y+23,r11 - std Y+24,r12 - dec r22 - breq 5322f - rjmp 59b -5322: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - std Z+4,r26 - std Z+5,r27 - std Z+6,r2 - std Z+7,r3 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - std Z+16,r8 - std Z+17,r9 - std Z+18,r10 - std Z+19,r11 - std Z+20,r12 - std Z+21,r13 - std Z+22,r14 - std Z+23,r15 - ldd r8,Y+17 - ldd r9,Y+18 - ldd r10,Y+19 - ldd r11,Y+20 - ldd r12,Y+21 - ldd r13,Y+22 - ldd r14,Y+23 - ldd r15,Y+24 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - adiw r28,40 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot256_permute_7, .-knot256_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-384-avr.S deleted file mode 100644 index 4d15898..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-384-avr.S +++ /dev/null @@ -1,833 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot384_permute_7 - .type knot384_permute_7, @function -knot384_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,72 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 87 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - ldd r4,Z+16 - ldd r5,Z+17 - ldd r6,Z+18 - ldd r7,Z+19 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r4,Z+28 - ldd r5,Z+29 - ldd r6,Z+30 - ldd r7,Z+31 - ldd r8,Z+32 - ldd r9,Z+33 - ldd r10,Z+34 - ldd r11,Z+35 - std Y+25,r26 - std Y+26,r27 - std Y+27,r2 - std Y+28,r3 - std Y+29,r4 - std Y+30,r5 - std Y+31,r6 - std Y+32,r7 - std Y+33,r8 - std Y+34,r9 - std Y+35,r10 - std Y+36,r11 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+37,r26 - std Y+38,r27 - std Y+39,r2 - std Y+40,r3 - std Y+41,r4 - std Y+42,r5 - std Y+43,r6 - std Y+44,r7 - std Y+45,r8 - std Y+46,r9 - std Y+47,r10 - std Y+48,r11 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r24,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif -99: - ldd r12,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r30 - ldd r18,Y+13 - ldd r19,Y+25 - ldd r20,Y+37 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+61,r23 - mov r14,r20 - eor r14,r12 - mov r26,r18 - or r26,r19 - eor r26,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+1,r21 - mov r21,r26 - and r21,r12 - eor r21,r13 - std Y+49,r21 - ldd r12,Y+2 - ldd r18,Y+14 - ldd r19,Y+26 - ldd r20,Y+38 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+62,r23 - mov r14,r20 - eor r14,r12 - mov r27,r18 - or r27,r19 - eor r27,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+2,r21 - mov r21,r27 - and r21,r12 - eor r21,r13 - std Y+50,r21 - ldd r12,Y+3 - ldd r18,Y+15 - ldd r19,Y+27 - ldd r20,Y+39 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - std Y+63,r23 - mov r14,r20 - eor r14,r12 - mov r2,r18 - or r2,r19 - eor r2,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+3,r21 - mov r21,r2 - and r21,r12 - eor r21,r13 - std Y+51,r21 - ldd r12,Y+4 - ldd r18,Y+16 - ldd r19,Y+28 - ldd r20,Y+40 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,192 - sbci r29,255 - st Y,r23 - subi r28,64 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r3,r18 - or r3,r19 - eor r3,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+4,r21 - mov r21,r3 - and r21,r12 - eor r21,r13 - std Y+52,r21 - ldd r12,Y+5 - ldd r18,Y+17 - ldd r19,Y+29 - ldd r20,Y+41 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,191 - sbci r29,255 - st Y,r23 - subi r28,65 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r4,r18 - or r4,r19 - eor r4,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+5,r21 - mov r21,r4 - and r21,r12 - eor r21,r13 - std Y+53,r21 - ldd r12,Y+6 - ldd r18,Y+18 - ldd r19,Y+30 - ldd r20,Y+42 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,190 - sbci r29,255 - st Y,r23 - subi r28,66 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r5,r18 - or r5,r19 - eor r5,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+6,r21 - mov r21,r5 - and r21,r12 - eor r21,r13 - std Y+54,r21 - ldd r12,Y+7 - ldd r18,Y+19 - ldd r19,Y+31 - ldd r20,Y+43 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,189 - sbci r29,255 - st Y,r23 - subi r28,67 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r6,r18 - or r6,r19 - eor r6,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+7,r21 - mov r21,r6 - and r21,r12 - eor r21,r13 - std Y+55,r21 - ldd r12,Y+8 - ldd r18,Y+20 - ldd r19,Y+32 - ldd r20,Y+44 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,188 - sbci r29,255 - st Y,r23 - subi r28,68 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r7,r18 - or r7,r19 - eor r7,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+8,r21 - mov r21,r7 - and r21,r12 - eor r21,r13 - std Y+56,r21 - ldd r12,Y+9 - ldd r18,Y+21 - ldd r19,Y+33 - ldd r20,Y+45 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,187 - sbci r29,255 - st Y,r23 - subi r28,69 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r8,r18 - or r8,r19 - eor r8,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+9,r21 - mov r21,r8 - and r21,r12 - eor r21,r13 - std Y+57,r21 - ldd r12,Y+10 - ldd r18,Y+22 - ldd r19,Y+34 - ldd r20,Y+46 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,186 - sbci r29,255 - st Y,r23 - subi r28,70 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r9,r18 - or r9,r19 - eor r9,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+10,r21 - mov r21,r9 - and r21,r12 - eor r21,r13 - std Y+58,r21 - ldd r12,Y+11 - ldd r18,Y+23 - ldd r19,Y+35 - ldd r20,Y+47 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,185 - sbci r29,255 - st Y,r23 - subi r28,71 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r10,r18 - or r10,r19 - eor r10,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+11,r21 - mov r21,r10 - and r21,r12 - eor r21,r13 - std Y+59,r21 - ldd r12,Y+12 - ldd r18,Y+24 - ldd r19,Y+36 - ldd r20,Y+48 - com r12 - mov r13,r18 - and r13,r12 - eor r13,r19 - mov r23,r20 - eor r23,r13 - subi r28,184 - sbci r29,255 - st Y,r23 - subi r28,72 - sbc r29,r1 - mov r14,r20 - eor r14,r12 - mov r11,r18 - or r11,r19 - eor r11,r14 - mov r12,r18 - eor r12,r20 - mov r21,r13 - and r21,r14 - eor r21,r12 - std Y+12,r21 - mov r21,r11 - and r21,r12 - eor r21,r13 - std Y+60,r21 - std Y+25,r11 - std Y+26,r26 - std Y+27,r27 - std Y+28,r2 - std Y+29,r3 - std Y+30,r4 - std Y+31,r5 - std Y+32,r6 - std Y+33,r7 - std Y+34,r8 - std Y+35,r9 - std Y+36,r10 - ldd r26,Y+49 - ldd r27,Y+50 - ldd r2,Y+51 - ldd r3,Y+52 - ldd r4,Y+53 - ldd r5,Y+54 - ldd r6,Y+55 - ldd r7,Y+56 - ldd r8,Y+57 - ldd r9,Y+58 - ldd r10,Y+59 - ldd r11,Y+60 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - adc r26,r1 - std Y+13,r26 - std Y+14,r27 - std Y+15,r2 - std Y+16,r3 - std Y+17,r4 - std Y+18,r5 - std Y+19,r6 - std Y+20,r7 - std Y+21,r8 - std Y+22,r9 - std Y+23,r10 - std Y+24,r11 - adiw r28,61 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y - subi r28,72 - sbc r29,r1 - bst r26,0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - ror r27 - ror r26 - bld r11,7 - std Y+37,r5 - std Y+38,r6 - std Y+39,r7 - std Y+40,r8 - std Y+41,r9 - std Y+42,r10 - std Y+43,r11 - std Y+44,r26 - std Y+45,r27 - std Y+46,r2 - std Y+47,r3 - std Y+48,r4 - dec r22 - breq 5542f - rjmp 99b -5542: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - ldd r4,Y+17 - ldd r5,Y+18 - ldd r6,Y+19 - ldd r7,Y+20 - ldd r8,Y+21 - ldd r9,Y+22 - ldd r10,Y+23 - ldd r11,Y+24 - std Z+12,r26 - std Z+13,r27 - std Z+14,r2 - std Z+15,r3 - std Z+16,r4 - std Z+17,r5 - std Z+18,r6 - std Z+19,r7 - std Z+20,r8 - std Z+21,r9 - std Z+22,r10 - std Z+23,r11 - ldd r26,Y+25 - ldd r27,Y+26 - ldd r2,Y+27 - ldd r3,Y+28 - ldd r4,Y+29 - ldd r5,Y+30 - ldd r6,Y+31 - ldd r7,Y+32 - ldd r8,Y+33 - ldd r9,Y+34 - ldd r10,Y+35 - ldd r11,Y+36 - std Z+24,r26 - std Z+25,r27 - std Z+26,r2 - std Z+27,r3 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+32,r8 - std Z+33,r9 - std Z+34,r10 - std Z+35,r11 - ldd r26,Y+37 - ldd r27,Y+38 - ldd r2,Y+39 - ldd r3,Y+40 - ldd r4,Y+41 - ldd r5,Y+42 - ldd r6,Y+43 - ldd r7,Y+44 - ldd r8,Y+45 - ldd r9,Y+46 - ldd r10,Y+47 - ldd r11,Y+48 - std Z+36,r26 - std Z+37,r27 - std Z+38,r2 - std Z+39,r3 - std Z+40,r4 - std Z+41,r5 - std Z+42,r6 - std Z+43,r7 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - subi r28,184 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot384_permute_7, .-knot384_permute_7 - -#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-512-avr.S deleted file mode 100644 index 6f92ac3..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot-512-avr.S +++ /dev/null @@ -1,2315 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_7, @object - .size table_7, 104 -table_7: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 16 - .byte 32 - .byte 65 - .byte 3 - .byte 6 - .byte 12 - .byte 24 - .byte 48 - .byte 97 - .byte 66 - .byte 5 - .byte 10 - .byte 20 - .byte 40 - .byte 81 - .byte 35 - .byte 71 - .byte 15 - .byte 30 - .byte 60 - .byte 121 - .byte 114 - .byte 100 - .byte 72 - .byte 17 - .byte 34 - .byte 69 - .byte 11 - .byte 22 - .byte 44 - .byte 89 - .byte 51 - .byte 103 - .byte 78 - .byte 29 - .byte 58 - .byte 117 - .byte 106 - .byte 84 - .byte 41 - .byte 83 - .byte 39 - .byte 79 - .byte 31 - .byte 62 - .byte 125 - .byte 122 - .byte 116 - .byte 104 - .byte 80 - .byte 33 - .byte 67 - .byte 7 - .byte 14 - .byte 28 - .byte 56 - .byte 113 - .byte 98 - .byte 68 - .byte 9 - .byte 18 - .byte 36 - .byte 73 - .byte 19 - .byte 38 - .byte 77 - .byte 27 - .byte 54 - .byte 109 - .byte 90 - .byte 53 - .byte 107 - .byte 86 - .byte 45 - .byte 91 - .byte 55 - .byte 111 - .byte 94 - .byte 61 - .byte 123 - .byte 118 - .byte 108 - .byte 88 - .byte 49 - .byte 99 - .byte 70 - .byte 13 - .byte 26 - .byte 52 - .byte 105 - .byte 82 - .byte 37 - .byte 75 - .byte 23 - .byte 46 - .byte 93 - .byte 59 - .byte 119 - .byte 110 - .byte 92 - - .text -.global knot512_permute_7 - .type knot512_permute_7, @function -knot512_permute_7: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_7) - ldi r31,hi8(table_7) -#if defined(RAMPZ) - ldi r17,hh8(table_7) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_7, .-knot512_permute_7 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_8, @object - .size table_8, 140 -table_8: - .byte 1 - .byte 2 - .byte 4 - .byte 8 - .byte 17 - .byte 35 - .byte 71 - .byte 142 - .byte 28 - .byte 56 - .byte 113 - .byte 226 - .byte 196 - .byte 137 - .byte 18 - .byte 37 - .byte 75 - .byte 151 - .byte 46 - .byte 92 - .byte 184 - .byte 112 - .byte 224 - .byte 192 - .byte 129 - .byte 3 - .byte 6 - .byte 12 - .byte 25 - .byte 50 - .byte 100 - .byte 201 - .byte 146 - .byte 36 - .byte 73 - .byte 147 - .byte 38 - .byte 77 - .byte 155 - .byte 55 - .byte 110 - .byte 220 - .byte 185 - .byte 114 - .byte 228 - .byte 200 - .byte 144 - .byte 32 - .byte 65 - .byte 130 - .byte 5 - .byte 10 - .byte 21 - .byte 43 - .byte 86 - .byte 173 - .byte 91 - .byte 182 - .byte 109 - .byte 218 - .byte 181 - .byte 107 - .byte 214 - .byte 172 - .byte 89 - .byte 178 - .byte 101 - .byte 203 - .byte 150 - .byte 44 - .byte 88 - .byte 176 - .byte 97 - .byte 195 - .byte 135 - .byte 15 - .byte 31 - .byte 62 - .byte 125 - .byte 251 - .byte 246 - .byte 237 - .byte 219 - .byte 183 - .byte 111 - .byte 222 - .byte 189 - .byte 122 - .byte 245 - .byte 235 - .byte 215 - .byte 174 - .byte 93 - .byte 186 - .byte 116 - .byte 232 - .byte 209 - .byte 162 - .byte 68 - .byte 136 - .byte 16 - .byte 33 - .byte 67 - .byte 134 - .byte 13 - .byte 27 - .byte 54 - .byte 108 - .byte 216 - .byte 177 - .byte 99 - .byte 199 - .byte 143 - .byte 30 - .byte 60 - .byte 121 - .byte 243 - .byte 231 - .byte 206 - .byte 156 - .byte 57 - .byte 115 - .byte 230 - .byte 204 - .byte 152 - .byte 49 - .byte 98 - .byte 197 - .byte 139 - .byte 22 - .byte 45 - .byte 90 - .byte 180 - .byte 105 - .byte 210 - .byte 164 - .byte 72 - .byte 145 - .byte 34 - .byte 69 - - .text -.global knot512_permute_8 - .type knot512_permute_8, @function -knot512_permute_8: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - movw r30,r24 - in r28,0x3d - in r29,0x3e - subi r28,96 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 113 - ld r26,Z - ldd r27,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - std Y+1,r26 - std Y+2,r27 - std Y+3,r2 - std Y+4,r3 - std Y+5,r4 - std Y+6,r5 - std Y+7,r6 - std Y+8,r7 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - std Y+13,r12 - std Y+14,r13 - std Y+15,r14 - std Y+16,r15 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r8,Z+24 - ldd r9,Z+25 - ldd r10,Z+26 - ldd r11,Z+27 - ldd r12,Z+28 - ldd r13,Z+29 - ldd r14,Z+30 - ldd r15,Z+31 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - ldd r26,Z+32 - ldd r27,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r8,Z+40 - ldd r9,Z+41 - ldd r10,Z+42 - ldd r11,Z+43 - ldd r12,Z+44 - ldd r13,Z+45 - ldd r14,Z+46 - ldd r15,Z+47 - std Y+33,r26 - std Y+34,r27 - std Y+35,r2 - std Y+36,r3 - std Y+37,r4 - std Y+38,r5 - std Y+39,r6 - std Y+40,r7 - std Y+41,r8 - std Y+42,r9 - std Y+43,r10 - std Y+44,r11 - std Y+45,r12 - std Y+46,r13 - std Y+47,r14 - std Y+48,r15 - ldd r26,Z+48 - ldd r27,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r8,Z+56 - ldd r9,Z+57 - ldd r10,Z+58 - ldd r11,Z+59 - ldd r12,Z+60 - ldd r13,Z+61 - ldd r14,Z+62 - ldd r15,Z+63 - adiw r28,49 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y+,r12 - st Y+,r13 - st Y+,r14 - st Y,r15 - subi r28,64 - sbc r29,r1 - push r31 - push r30 - ldi r30,lo8(table_8) - ldi r31,hi8(table_8) -#if defined(RAMPZ) - ldi r17,hh8(table_8) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif -134: - ldd r24,Y+1 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r24,r18 - inc r30 - ldd r18,Y+17 - ldd r19,Y+33 - ldd r20,Y+49 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,175 - sbci r29,255 - st Y,r23 - subi r28,81 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r26,r18 - or r26,r19 - eor r26,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+1,r21 - mov r21,r26 - and r21,r24 - eor r21,r25 - subi r28,191 - sbci r29,255 - st Y,r21 - subi r28,65 - sbc r29,r1 - ldd r24,Y+2 - ldd r18,Y+18 - ldd r19,Y+34 - ldd r20,Y+50 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,174 - sbci r29,255 - st Y,r23 - subi r28,82 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r27,r18 - or r27,r19 - eor r27,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+2,r21 - mov r21,r27 - and r21,r24 - eor r21,r25 - subi r28,190 - sbci r29,255 - st Y,r21 - subi r28,66 - sbc r29,r1 - ldd r24,Y+3 - ldd r18,Y+19 - ldd r19,Y+35 - ldd r20,Y+51 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,173 - sbci r29,255 - st Y,r23 - subi r28,83 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r2,r18 - or r2,r19 - eor r2,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+3,r21 - mov r21,r2 - and r21,r24 - eor r21,r25 - subi r28,189 - sbci r29,255 - st Y,r21 - subi r28,67 - sbc r29,r1 - ldd r24,Y+4 - ldd r18,Y+20 - ldd r19,Y+36 - ldd r20,Y+52 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,172 - sbci r29,255 - st Y,r23 - subi r28,84 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r3,r18 - or r3,r19 - eor r3,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+4,r21 - mov r21,r3 - and r21,r24 - eor r21,r25 - subi r28,188 - sbci r29,255 - st Y,r21 - subi r28,68 - sbc r29,r1 - ldd r24,Y+5 - ldd r18,Y+21 - ldd r19,Y+37 - ldd r20,Y+53 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,171 - sbci r29,255 - st Y,r23 - subi r28,85 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r4,r18 - or r4,r19 - eor r4,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+5,r21 - mov r21,r4 - and r21,r24 - eor r21,r25 - subi r28,187 - sbci r29,255 - st Y,r21 - subi r28,69 - sbc r29,r1 - ldd r24,Y+6 - ldd r18,Y+22 - ldd r19,Y+38 - ldd r20,Y+54 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,170 - sbci r29,255 - st Y,r23 - subi r28,86 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r5,r18 - or r5,r19 - eor r5,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+6,r21 - mov r21,r5 - and r21,r24 - eor r21,r25 - subi r28,186 - sbci r29,255 - st Y,r21 - subi r28,70 - sbc r29,r1 - ldd r24,Y+7 - ldd r18,Y+23 - ldd r19,Y+39 - ldd r20,Y+55 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,169 - sbci r29,255 - st Y,r23 - subi r28,87 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r6,r18 - or r6,r19 - eor r6,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+7,r21 - mov r21,r6 - and r21,r24 - eor r21,r25 - subi r28,185 - sbci r29,255 - st Y,r21 - subi r28,71 - sbc r29,r1 - ldd r24,Y+8 - ldd r18,Y+24 - ldd r19,Y+40 - ldd r20,Y+56 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,168 - sbci r29,255 - st Y,r23 - subi r28,88 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r7,r18 - or r7,r19 - eor r7,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+8,r21 - mov r21,r7 - and r21,r24 - eor r21,r25 - subi r28,184 - sbci r29,255 - st Y,r21 - subi r28,72 - sbc r29,r1 - ldd r24,Y+9 - ldd r18,Y+25 - ldd r19,Y+41 - ldd r20,Y+57 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,167 - sbci r29,255 - st Y,r23 - subi r28,89 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r8,r18 - or r8,r19 - eor r8,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+9,r21 - mov r21,r8 - and r21,r24 - eor r21,r25 - subi r28,183 - sbci r29,255 - st Y,r21 - subi r28,73 - sbc r29,r1 - ldd r24,Y+10 - ldd r18,Y+26 - ldd r19,Y+42 - ldd r20,Y+58 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,166 - sbci r29,255 - st Y,r23 - subi r28,90 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r9,r18 - or r9,r19 - eor r9,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+10,r21 - mov r21,r9 - and r21,r24 - eor r21,r25 - subi r28,182 - sbci r29,255 - st Y,r21 - subi r28,74 - sbc r29,r1 - ldd r24,Y+11 - ldd r18,Y+27 - ldd r19,Y+43 - ldd r20,Y+59 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,165 - sbci r29,255 - st Y,r23 - subi r28,91 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r10,r18 - or r10,r19 - eor r10,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+11,r21 - mov r21,r10 - and r21,r24 - eor r21,r25 - subi r28,181 - sbci r29,255 - st Y,r21 - subi r28,75 - sbc r29,r1 - ldd r24,Y+12 - ldd r18,Y+28 - ldd r19,Y+44 - ldd r20,Y+60 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,164 - sbci r29,255 - st Y,r23 - subi r28,92 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r11,r18 - or r11,r19 - eor r11,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+12,r21 - mov r21,r11 - and r21,r24 - eor r21,r25 - subi r28,180 - sbci r29,255 - st Y,r21 - subi r28,76 - sbc r29,r1 - ldd r24,Y+13 - ldd r18,Y+29 - ldd r19,Y+45 - ldd r20,Y+61 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,163 - sbci r29,255 - st Y,r23 - subi r28,93 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r12,r18 - or r12,r19 - eor r12,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+13,r21 - mov r21,r12 - and r21,r24 - eor r21,r25 - subi r28,179 - sbci r29,255 - st Y,r21 - subi r28,77 - sbc r29,r1 - ldd r24,Y+14 - ldd r18,Y+30 - ldd r19,Y+46 - ldd r20,Y+62 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,162 - sbci r29,255 - st Y,r23 - subi r28,94 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r13,r18 - or r13,r19 - eor r13,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+14,r21 - mov r21,r13 - and r21,r24 - eor r21,r25 - subi r28,178 - sbci r29,255 - st Y,r21 - subi r28,78 - sbc r29,r1 - ldd r24,Y+15 - ldd r18,Y+31 - ldd r19,Y+47 - ldd r20,Y+63 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,161 - sbci r29,255 - st Y,r23 - subi r28,95 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r14,r18 - or r14,r19 - eor r14,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+15,r21 - mov r21,r14 - and r21,r24 - eor r21,r25 - subi r28,177 - sbci r29,255 - st Y,r21 - subi r28,79 - sbc r29,r1 - ldd r24,Y+16 - ldd r18,Y+32 - ldd r19,Y+48 - subi r28,192 - sbci r29,255 - ld r20,Y - subi r28,64 - sbc r29,r1 - com r24 - mov r25,r18 - and r25,r24 - eor r25,r19 - mov r23,r20 - eor r23,r25 - subi r28,160 - sbci r29,255 - st Y,r23 - subi r28,96 - sbc r29,r1 - mov r16,r20 - eor r16,r24 - mov r15,r18 - or r15,r19 - eor r15,r16 - mov r24,r18 - eor r24,r20 - mov r21,r25 - and r21,r16 - eor r21,r24 - std Y+16,r21 - mov r21,r15 - and r21,r24 - eor r21,r25 - subi r28,176 - sbci r29,255 - st Y,r21 - subi r28,80 - sbc r29,r1 - std Y+33,r14 - std Y+34,r15 - std Y+35,r26 - std Y+36,r27 - std Y+37,r2 - std Y+38,r3 - std Y+39,r4 - std Y+40,r5 - std Y+41,r6 - std Y+42,r7 - std Y+43,r8 - std Y+44,r9 - std Y+45,r10 - std Y+46,r11 - std Y+47,r12 - std Y+48,r13 - subi r28,191 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,80 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - std Y+17,r26 - std Y+18,r27 - std Y+19,r2 - std Y+20,r3 - std Y+21,r4 - std Y+22,r5 - std Y+23,r6 - std Y+24,r7 - std Y+25,r8 - std Y+26,r9 - std Y+27,r10 - std Y+28,r11 - std Y+29,r12 - std Y+30,r13 - std Y+31,r14 - std Y+32,r15 - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,96 - sbc r29,r1 - lsl r26 - rol r27 - rol r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - rol r10 - rol r11 - rol r12 - rol r13 - rol r14 - rol r15 - adc r26,r1 - adiw r28,49 - st Y+,r13 - st Y+,r14 - st Y+,r15 - st Y+,r26 - st Y+,r27 - st Y+,r2 - st Y+,r3 - st Y+,r4 - st Y+,r5 - st Y+,r6 - st Y+,r7 - st Y+,r8 - st Y+,r9 - st Y+,r10 - st Y+,r11 - st Y,r12 - subi r28,64 - sbc r29,r1 - dec r22 - breq 5812f - rjmp 134b -5812: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r2,Y+3 - ldd r3,Y+4 - ldd r4,Y+5 - ldd r5,Y+6 - ldd r6,Y+7 - ldd r7,Y+8 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - ldd r12,Y+13 - ldd r13,Y+14 - ldd r14,Y+15 - ldd r15,Y+16 - st Z,r26 - std Z+1,r27 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - ldd r26,Y+17 - ldd r27,Y+18 - ldd r2,Y+19 - ldd r3,Y+20 - ldd r4,Y+21 - ldd r5,Y+22 - ldd r6,Y+23 - ldd r7,Y+24 - ldd r8,Y+25 - ldd r9,Y+26 - ldd r10,Y+27 - ldd r11,Y+28 - ldd r12,Y+29 - ldd r13,Y+30 - ldd r14,Y+31 - ldd r15,Y+32 - std Z+16,r26 - std Z+17,r27 - std Z+18,r2 - std Z+19,r3 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r8 - std Z+25,r9 - std Z+26,r10 - std Z+27,r11 - std Z+28,r12 - std Z+29,r13 - std Z+30,r14 - std Z+31,r15 - ldd r26,Y+33 - ldd r27,Y+34 - ldd r2,Y+35 - ldd r3,Y+36 - ldd r4,Y+37 - ldd r5,Y+38 - ldd r6,Y+39 - ldd r7,Y+40 - ldd r8,Y+41 - ldd r9,Y+42 - ldd r10,Y+43 - ldd r11,Y+44 - ldd r12,Y+45 - ldd r13,Y+46 - ldd r14,Y+47 - ldd r15,Y+48 - std Z+32,r26 - std Z+33,r27 - std Z+34,r2 - std Z+35,r3 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r8 - std Z+41,r9 - std Z+42,r10 - std Z+43,r11 - std Z+44,r12 - std Z+45,r13 - std Z+46,r14 - std Z+47,r15 - adiw r28,49 - ld r26,Y+ - ld r27,Y+ - ld r2,Y+ - ld r3,Y+ - ld r4,Y+ - ld r5,Y+ - ld r6,Y+ - ld r7,Y+ - ld r8,Y+ - ld r9,Y+ - ld r10,Y+ - ld r11,Y+ - ld r12,Y+ - ld r13,Y+ - ld r14,Y+ - ld r15,Y - subi r28,64 - sbc r29,r1 - std Z+48,r26 - std Z+49,r27 - std Z+50,r2 - std Z+51,r3 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - std Z+56,r8 - std Z+57,r9 - std Z+58,r10 - std Z+59,r11 - std Z+60,r12 - std Z+61,r13 - std Z+62,r14 - std Z+63,r15 - subi r28,160 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size knot512_permute_8, .-knot512_permute_8 - -#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.c b/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.c deleted file mode 100644 index f8b378e..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-knot.h" - -#if !defined(__AVR__) - -/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ -static uint8_t const rc6[52] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, - 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, - 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, - 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, - 0x0d, 0x1a, 0x35, 0x2a -}; -static uint8_t const rc7[104] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, - 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, - 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, - 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, - 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, - 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, - 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, - 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, - 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c -}; -static uint8_t const rc8[140] = { - 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, - 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, - 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, - 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, - 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, - 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, - 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, - 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, - 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, - 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, - 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, - 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 -}; - -/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ -#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint64_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ -#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ - do { \ - uint32_t t1, t3, t6; \ - t1 = ~(a0); \ - t3 = (a2) ^ ((a1) & t1); \ - (b3) = (a3) ^ t3; \ - t6 = (a3) ^ t1; \ - (b2) = ((a1) | (a2)) ^ t6; \ - t1 = (a1) ^ (a3); \ - (a0) = t1 ^ (t3 & t6); \ - (b1) = t3 ^ ((b2) & t1); \ - } while (0) - -static void knot256_permute - (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b1, b2, b3; - - /* Load the input state into local variables; each row is 64 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x1, x2, x3, b1, b2, b3); - - /* Linear diffusion layer */ - x1 = leftRotate1_64(b1); - x2 = leftRotate8_64(b2); - x3 = leftRotate25_64(b3); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); -#endif -} - -void knot256_permute_6(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc6, rounds); -} - -void knot256_permute_7(knot256_state_t *state, uint8_t rounds) -{ - knot256_permute(state, rc7, rounds); -} - -void knot384_permute_7(knot384_state_t *state, uint8_t rounds) -{ - const uint8_t *rc = rc7; - uint64_t b2, b4, b6; - uint32_t b3, b5, b7; - - /* Load the input state into local variables; each row is 96 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint32_t x1 = state->W[2]; - uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); - uint32_t x3 = state->W[5]; - uint64_t x4 = state->S[3]; - uint32_t x5 = state->W[8]; - uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); - uint32_t x7 = state->W[11]; -#else - uint64_t x0 = le_load_word64(state->B); - uint32_t x1 = le_load_word32(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 12); - uint32_t x3 = le_load_word32(state->B + 20); - uint64_t x4 = le_load_word64(state->B + 24); - uint32_t x5 = le_load_word32(state->B + 32); - uint64_t x6 = le_load_word64(state->B + 36); - uint32_t x7 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox32(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotateShort_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - #define leftRotateLong_96(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | \ - (((uint64_t)(b1)) << ((bits) - 32)) | \ - ((b0) >> (96 - (bits))); \ - (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ - } while (0) - leftRotateShort_96(x2, x3, b2, b3, 1); - leftRotateShort_96(x4, x5, b4, b5, 8); - leftRotateLong_96(x6, x7, b6, b7, 55); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->W[2] = x1; - state->W[3] = (uint32_t)x2; - state->W[4] = (uint32_t)(x2 >> 32); - state->W[5] = x3; - state->S[3] = x4; - state->W[8] = x5; - state->W[9] = (uint32_t)x6; - state->W[10] = (uint32_t)(x6 >> 32); - state->W[11] = x7; -#else - le_store_word64(state->B, x0); - le_store_word32(state->B + 8, x1); - le_store_word64(state->B + 12, x2); - le_store_word32(state->B + 20, x3); - le_store_word64(state->B + 24, x4); - le_store_word32(state->B + 32, x5); - le_store_word64(state->B + 36, x6); - le_store_word32(state->B + 44, x7); -#endif -} - -static void knot512_permute - (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) -{ - uint64_t b2, b3, b4, b5, b6, b7; - - /* Load the input state into local variables; each row is 128 bits */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - uint64_t x0 = state->S[0]; - uint64_t x1 = state->S[1]; - uint64_t x2 = state->S[2]; - uint64_t x3 = state->S[3]; - uint64_t x4 = state->S[4]; - uint64_t x5 = state->S[5]; - uint64_t x6 = state->S[6]; - uint64_t x7 = state->S[7]; -#else - uint64_t x0 = le_load_word64(state->B); - uint64_t x1 = le_load_word64(state->B + 8); - uint64_t x2 = le_load_word64(state->B + 16); - uint64_t x3 = le_load_word64(state->B + 24); - uint64_t x4 = le_load_word64(state->B + 32); - uint64_t x5 = le_load_word64(state->B + 40); - uint64_t x6 = le_load_word64(state->B + 48); - uint64_t x7 = le_load_word64(state->B + 56); -#endif - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds) { - /* Add the next round constant to the state */ - x0 ^= *rc++; - - /* Substitution layer */ - knot_sbox64(x0, x2, x4, x6, b2, b4, b6); - knot_sbox64(x1, x3, x5, x7, b3, b5, b7); - - /* Linear diffusion layer */ - #define leftRotate_128(a0, a1, b0, b1, bits) \ - do { \ - (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ - (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ - } while (0) - leftRotate_128(x2, x3, b2, b3, 1); - leftRotate_128(x4, x5, b4, b5, 16); - leftRotate_128(x6, x7, b6, b7, 25); - } - - /* Store the local variables to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0] = x0; - state->S[1] = x1; - state->S[2] = x2; - state->S[3] = x3; - state->S[4] = x4; - state->S[5] = x5; - state->S[6] = x6; - state->S[7] = x7; -#else - le_store_word64(state->B, x0); - le_store_word64(state->B + 8, x1); - le_store_word64(state->B + 16, x2); - le_store_word64(state->B + 24, x3); - le_store_word64(state->B + 32, x4); - le_store_word64(state->B + 40, x5); - le_store_word64(state->B + 48, x6); - le_store_word64(state->B + 56, x7); -#endif -} - -void knot512_permute_7(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc7, rounds); -} - -void knot512_permute_8(knot512_state_t *state, uint8_t rounds) -{ - knot512_permute(state, rc8, rounds); -} - -#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.h b/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.h deleted file mode 100644 index 88a782c..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-knot.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_KNOT_H -#define LW_INTERNAL_KNOT_H - -#include "internal-util.h" - -/** - * \file internal-knot.h - * \brief Permutations that are used by the KNOT AEAD and hash algorithms. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Internal state of the KNOT-256 permutation. - */ -typedef union -{ - uint64_t S[4]; /**< Words of the state */ - uint8_t B[32]; /**< Bytes of the state */ - -} knot256_state_t; - -/** - * \brief Internal state of the KNOT-384 permutation. - */ -typedef union -{ - uint64_t S[6]; /**< 64-bit words of the state */ - uint32_t W[12]; /**< 32-bit words of the state */ - uint8_t B[48]; /**< Bytes of the state */ - -} knot384_state_t; - -/** - * \brief Internal state of the KNOT-512 permutation. - */ -typedef union -{ - uint64_t S[8]; /**< Words of the state */ - uint8_t B[64]; /**< Bytes of the state */ - -} knot512_state_t; - -/** - * \brief Permutes the KNOT-256 state, using 6-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 52. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_6(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-256 state, using 7-bit round constants. - * - * \param state The KNOT-256 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot256_permute_7(knot256_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-384 state, using 7-bit round constants. - * - * \param state The KNOT-384 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot384_permute_7(knot384_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 7-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 104. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_7(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Permutes the KNOT-512 state, using 8-bit round constants. - * - * \param state The KNOT-512 state to be permuted. - * \param rounds The number of rounds to be performed, 1 to 140. - * - * The input and output \a state will be in little-endian byte order. - */ -void knot512_permute_8(knot512_state_t *state, uint8_t rounds); - -/** - * \brief Generic pointer to a function that performs a KNOT permutation. - * - * \param state Points to the permutation state. - * \param round Number of rounds to perform. - */ -typedef void (*knot_permute_t)(void *state, uint8_t rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-util.h b/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/knot-hash.c b/knot/Implementations/crypto_hash/knot512/rhys-avr/knot-hash.c deleted file mode 100644 index a4edecd..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/knot-hash.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "knot.h" -#include "internal-knot.h" -#include - -aead_hash_algorithm_t const knot_hash_256_256_algorithm = { - "KNOT-HASH-256-256", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_256, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_256_384_algorithm = { - "KNOT-HASH-256-384", - sizeof(int), - KNOT_HASH_256_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_256_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_384_384_algorithm = { - "KNOT-HASH-384-384", - sizeof(int), - KNOT_HASH_384_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_384_384, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const knot_hash_512_512_algorithm = { - "KNOT-HASH-512-512", - sizeof(int), - KNOT_HASH_512_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - knot_hash_512_512, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Input rate for KNOT-HASH-256-256. - */ -#define KNOT_HASH_256_256_RATE 4 - -/** - * \brief Input rate for KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_384_RATE 16 - -/** - * \brief Input rate for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_384_RATE 6 - -/** - * \brief Input rate for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_512_RATE 8 - -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot256_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_256_256_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); - knot256_permute_7(&state, 68); - in += KNOT_HASH_256_256_RATE; - inlen -= KNOT_HASH_256_256_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot256_permute_7(&state, 68); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot256_permute_7(&state, 68); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - state.B[sizeof(state.B) - 1] ^= 0x80; - while (inlen >= KNOT_HASH_256_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); - knot384_permute_7(&state, 80); - in += KNOT_HASH_256_384_RATE; - inlen -= KNOT_HASH_256_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 80); - memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); - knot384_permute_7(&state, 80); - memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); - return 0; -} - -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot384_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_384_384_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); - knot384_permute_7(&state, 104); - in += KNOT_HASH_384_384_RATE; - inlen -= KNOT_HASH_384_384_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot384_permute_7(&state, 104); - memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); - knot384_permute_7(&state, 104); - memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); - return 0; -} - -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - knot512_state_t state; - unsigned temp; - memset(state.B, 0, sizeof(state.B)); - while (inlen >= KNOT_HASH_512_512_RATE) { - lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); - knot512_permute_8(&state, 140); - in += KNOT_HASH_512_512_RATE; - inlen -= KNOT_HASH_512_512_RATE; - } - temp = (unsigned)inlen; - lw_xor_block(state.B, in, temp); - state.B[temp] ^= 0x01; - knot512_permute_8(&state, 140); - memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); - knot512_permute_8(&state, 140); - memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); - return 0; -} diff --git a/knot/Implementations/crypto_hash/knot512/rhys-avr/knot.h b/knot/Implementations/crypto_hash/knot512/rhys-avr/knot.h deleted file mode 100644 index e2c5198..0000000 --- a/knot/Implementations/crypto_hash/knot512/rhys-avr/knot.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_KNOT_H -#define LWCRYPTO_KNOT_H - -#include "aead-common.h" - -/** - * \file knot.h - * \brief KNOT authenticated encryption and hash algorithms. - * - * KNOT is a family of authenticated encryption and hash algorithms built - * around a permutation and the MonkeyDuplex sponge construction. The - * family members are: - * - * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 256-bit permutation. This is the primary - * encryption member of the family. - * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a - * 192-bit tag, built around a 384-bit permutation. - * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a - * 256-bit tag, built around a 512-bit permutation. - * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a - * 256-bit permutation. This is the primary hashing member of the family. - * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a - * 384-bit permutation. - * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a - * 512-bit permutation. - * - * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-128-256 and - * KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. - */ -#define KNOT_AEAD_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for KNOT-AEAD-256-512. - */ -#define KNOT_AEAD_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. - */ -#define KNOT_AEAD_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. - */ -#define KNOT_HASH_256_SIZE 32 - -/** - * \brief Size of the hash for KNOT-HASH-384-384. - */ -#define KNOT_HASH_384_SIZE 48 - -/** - * \brief Size of the hash for KNOT-HASH-512-512. - */ -#define KNOT_HASH_512_SIZE 64 - -/** - * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. - */ -extern aead_cipher_t const knot_aead_128_256_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. - */ -extern aead_cipher_t const knot_aead_128_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. - */ -extern aead_cipher_t const knot_aead_192_384_cipher; - -/** - * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. - */ -extern aead_cipher_t const knot_aead_256_512_cipher; - -/** - * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; - -/** - * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. - */ -extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_256_decrypt() - */ -int knot_aead_128_256_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_256_encrypt() - */ -int knot_aead_128_256_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_128_384_decrypt() - */ -int knot_aead_128_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_128_384_encrypt() - */ -int knot_aead_128_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_192_384_decrypt() - */ -int knot_aead_192_384_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_192_384_encrypt() - */ -int knot_aead_192_384_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa knot_aead_256_512_decrypt() - */ -int knot_aead_256_512_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa knot_aead_256_512_encrypt() - */ -int knot_aead_256_512_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-256. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_256 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-256-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_256_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_256_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-384-384. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_384_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_384_384 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with KNOT-HASH-512-512. - * - * \param out Buffer to receive the hash output which must be at least - * KNOT_HASH_512_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int knot_hash_512_512 - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys/aead-common.c b/knot/Implementations/crypto_hash/knot512/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/knot/Implementations/crypto_hash/knot512/rhys/aead-common.h b/knot/Implementations/crypto_hash/knot512/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys/api.h b/knot/Implementations/crypto_hash/knot512/rhys/api.h new file mode 100644 index 0000000..de9380d --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 64 diff --git a/knot/Implementations/crypto_hash/knot512/rhys/hash.c b/knot/Implementations/crypto_hash/knot512/rhys/hash.c new file mode 100644 index 0000000..7c0a3b3 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "knot.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return knot_hash_512_512(out, in, inlen); +} diff --git a/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-256-avr.S b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-256-avr.S new file mode 100644 index 0000000..15e6389 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-256-avr.S @@ -0,0 +1,1093 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_6, @object + .size table_6, 52 +table_6: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 33 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 49 + .byte 34 + .byte 5 + .byte 10 + .byte 20 + .byte 41 + .byte 19 + .byte 39 + .byte 15 + .byte 30 + .byte 61 + .byte 58 + .byte 52 + .byte 40 + .byte 17 + .byte 35 + .byte 7 + .byte 14 + .byte 28 + .byte 57 + .byte 50 + .byte 36 + .byte 9 + .byte 18 + .byte 37 + .byte 11 + .byte 22 + .byte 45 + .byte 27 + .byte 55 + .byte 46 + .byte 29 + .byte 59 + .byte 54 + .byte 44 + .byte 25 + .byte 51 + .byte 38 + .byte 13 + .byte 26 + .byte 53 + .byte 42 + + .text +.global knot256_permute_6 + .type knot256_permute_6, @function +knot256_permute_6: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_6) + ldi r31,hi8(table_6) +#if defined(RAMPZ) + ldi r17,hh8(table_6) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_6, .-knot256_permute_6 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot256_permute_7 + .type knot256_permute_7, @function +knot256_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 57 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Z+16 + ldd r9,Z+17 + ldd r10,Z+18 + ldd r11,Z+19 + ldd r12,Z+20 + ldd r13,Z+21 + ldd r14,Z+22 + ldd r15,Z+23 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r8 + std Y+18,r9 + std Y+19,r10 + std Y+20,r11 + std Y+21,r12 + std Y+22,r13 + std Y+23,r14 + std Y+24,r15 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +59: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r18,r23 + inc r30 + ldd r23,Y+1 + ldd r4,Y+9 + ldd r5,Y+17 + mov r24,r18 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+33,r7 + mov r16,r5 + eor r16,r24 + mov r8,r23 + or r8,r4 + eor r8,r16 + mov r24,r23 + eor r24,r5 + mov r18,r25 + and r18,r16 + eor r18,r24 + mov r6,r8 + and r6,r24 + eor r6,r25 + std Y+25,r6 + ldd r23,Y+2 + ldd r4,Y+10 + ldd r5,Y+18 + mov r24,r19 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+34,r7 + mov r16,r5 + eor r16,r24 + mov r9,r23 + or r9,r4 + eor r9,r16 + mov r24,r23 + eor r24,r5 + mov r19,r25 + and r19,r16 + eor r19,r24 + mov r6,r9 + and r6,r24 + eor r6,r25 + std Y+26,r6 + ldd r23,Y+3 + ldd r4,Y+11 + ldd r5,Y+19 + mov r24,r20 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+35,r7 + mov r16,r5 + eor r16,r24 + mov r10,r23 + or r10,r4 + eor r10,r16 + mov r24,r23 + eor r24,r5 + mov r20,r25 + and r20,r16 + eor r20,r24 + mov r6,r10 + and r6,r24 + eor r6,r25 + std Y+27,r6 + ldd r23,Y+4 + ldd r4,Y+12 + ldd r5,Y+20 + mov r24,r21 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+36,r7 + mov r16,r5 + eor r16,r24 + mov r11,r23 + or r11,r4 + eor r11,r16 + mov r24,r23 + eor r24,r5 + mov r21,r25 + and r21,r16 + eor r21,r24 + mov r6,r11 + and r6,r24 + eor r6,r25 + std Y+28,r6 + ldd r23,Y+5 + ldd r4,Y+13 + ldd r5,Y+21 + mov r24,r26 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+37,r7 + mov r16,r5 + eor r16,r24 + mov r12,r23 + or r12,r4 + eor r12,r16 + mov r24,r23 + eor r24,r5 + mov r26,r25 + and r26,r16 + eor r26,r24 + mov r6,r12 + and r6,r24 + eor r6,r25 + std Y+29,r6 + ldd r23,Y+6 + ldd r4,Y+14 + ldd r5,Y+22 + mov r24,r27 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+38,r7 + mov r16,r5 + eor r16,r24 + mov r13,r23 + or r13,r4 + eor r13,r16 + mov r24,r23 + eor r24,r5 + mov r27,r25 + and r27,r16 + eor r27,r24 + mov r6,r13 + and r6,r24 + eor r6,r25 + std Y+30,r6 + ldd r23,Y+7 + ldd r4,Y+15 + ldd r5,Y+23 + mov r24,r2 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+39,r7 + mov r16,r5 + eor r16,r24 + mov r14,r23 + or r14,r4 + eor r14,r16 + mov r24,r23 + eor r24,r5 + mov r2,r25 + and r2,r16 + eor r2,r24 + mov r6,r14 + and r6,r24 + eor r6,r25 + std Y+31,r6 + ldd r23,Y+8 + ldd r4,Y+16 + ldd r5,Y+24 + mov r24,r3 + com r24 + mov r25,r23 + and r25,r24 + eor r25,r4 + mov r7,r5 + eor r7,r25 + std Y+40,r7 + mov r16,r5 + eor r16,r24 + mov r15,r23 + or r15,r4 + eor r15,r16 + mov r24,r23 + eor r24,r5 + mov r3,r25 + and r3,r16 + eor r3,r24 + mov r6,r15 + and r6,r24 + eor r6,r25 + std Y+32,r6 + std Y+9,r15 + std Y+10,r8 + std Y+11,r9 + std Y+12,r10 + std Y+13,r11 + std Y+14,r12 + std Y+15,r13 + std Y+16,r14 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + std Y+5,r12 + std Y+6,r13 + std Y+7,r14 + std Y+8,r15 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + ldd r12,Y+37 + ldd r13,Y+38 + ldd r14,Y+39 + ldd r15,Y+40 + lsl r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r8,r1 + std Y+17,r13 + std Y+18,r14 + std Y+19,r15 + std Y+20,r8 + std Y+21,r9 + std Y+22,r10 + std Y+23,r11 + std Y+24,r12 + dec r22 + breq 5322f + rjmp 59b +5322: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + std Z+4,r26 + std Z+5,r27 + std Z+6,r2 + std Z+7,r3 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + std Z+16,r8 + std Z+17,r9 + std Z+18,r10 + std Z+19,r11 + std Z+20,r12 + std Z+21,r13 + std Z+22,r14 + std Z+23,r15 + ldd r8,Y+17 + ldd r9,Y+18 + ldd r10,Y+19 + ldd r11,Y+20 + ldd r12,Y+21 + ldd r13,Y+22 + ldd r14,Y+23 + ldd r15,Y+24 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + adiw r28,40 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot256_permute_7, .-knot256_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-384-avr.S b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-384-avr.S new file mode 100644 index 0000000..4d15898 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-384-avr.S @@ -0,0 +1,833 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot384_permute_7 + .type knot384_permute_7, @function +knot384_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,72 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 87 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + ldd r4,Z+16 + ldd r5,Z+17 + ldd r6,Z+18 + ldd r7,Z+19 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r4,Z+28 + ldd r5,Z+29 + ldd r6,Z+30 + ldd r7,Z+31 + ldd r8,Z+32 + ldd r9,Z+33 + ldd r10,Z+34 + ldd r11,Z+35 + std Y+25,r26 + std Y+26,r27 + std Y+27,r2 + std Y+28,r3 + std Y+29,r4 + std Y+30,r5 + std Y+31,r6 + std Y+32,r7 + std Y+33,r8 + std Y+34,r9 + std Y+35,r10 + std Y+36,r11 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+37,r26 + std Y+38,r27 + std Y+39,r2 + std Y+40,r3 + std Y+41,r4 + std Y+42,r5 + std Y+43,r6 + std Y+44,r7 + std Y+45,r8 + std Y+46,r9 + std Y+47,r10 + std Y+48,r11 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r24,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif +99: + ldd r12,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r30 + ldd r18,Y+13 + ldd r19,Y+25 + ldd r20,Y+37 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+61,r23 + mov r14,r20 + eor r14,r12 + mov r26,r18 + or r26,r19 + eor r26,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+1,r21 + mov r21,r26 + and r21,r12 + eor r21,r13 + std Y+49,r21 + ldd r12,Y+2 + ldd r18,Y+14 + ldd r19,Y+26 + ldd r20,Y+38 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+62,r23 + mov r14,r20 + eor r14,r12 + mov r27,r18 + or r27,r19 + eor r27,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+2,r21 + mov r21,r27 + and r21,r12 + eor r21,r13 + std Y+50,r21 + ldd r12,Y+3 + ldd r18,Y+15 + ldd r19,Y+27 + ldd r20,Y+39 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + std Y+63,r23 + mov r14,r20 + eor r14,r12 + mov r2,r18 + or r2,r19 + eor r2,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+3,r21 + mov r21,r2 + and r21,r12 + eor r21,r13 + std Y+51,r21 + ldd r12,Y+4 + ldd r18,Y+16 + ldd r19,Y+28 + ldd r20,Y+40 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,192 + sbci r29,255 + st Y,r23 + subi r28,64 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r3,r18 + or r3,r19 + eor r3,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+4,r21 + mov r21,r3 + and r21,r12 + eor r21,r13 + std Y+52,r21 + ldd r12,Y+5 + ldd r18,Y+17 + ldd r19,Y+29 + ldd r20,Y+41 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,191 + sbci r29,255 + st Y,r23 + subi r28,65 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r4,r18 + or r4,r19 + eor r4,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+5,r21 + mov r21,r4 + and r21,r12 + eor r21,r13 + std Y+53,r21 + ldd r12,Y+6 + ldd r18,Y+18 + ldd r19,Y+30 + ldd r20,Y+42 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,190 + sbci r29,255 + st Y,r23 + subi r28,66 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r5,r18 + or r5,r19 + eor r5,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+6,r21 + mov r21,r5 + and r21,r12 + eor r21,r13 + std Y+54,r21 + ldd r12,Y+7 + ldd r18,Y+19 + ldd r19,Y+31 + ldd r20,Y+43 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,189 + sbci r29,255 + st Y,r23 + subi r28,67 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r6,r18 + or r6,r19 + eor r6,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+7,r21 + mov r21,r6 + and r21,r12 + eor r21,r13 + std Y+55,r21 + ldd r12,Y+8 + ldd r18,Y+20 + ldd r19,Y+32 + ldd r20,Y+44 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,188 + sbci r29,255 + st Y,r23 + subi r28,68 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r7,r18 + or r7,r19 + eor r7,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+8,r21 + mov r21,r7 + and r21,r12 + eor r21,r13 + std Y+56,r21 + ldd r12,Y+9 + ldd r18,Y+21 + ldd r19,Y+33 + ldd r20,Y+45 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,187 + sbci r29,255 + st Y,r23 + subi r28,69 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r8,r18 + or r8,r19 + eor r8,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+9,r21 + mov r21,r8 + and r21,r12 + eor r21,r13 + std Y+57,r21 + ldd r12,Y+10 + ldd r18,Y+22 + ldd r19,Y+34 + ldd r20,Y+46 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,186 + sbci r29,255 + st Y,r23 + subi r28,70 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r9,r18 + or r9,r19 + eor r9,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+10,r21 + mov r21,r9 + and r21,r12 + eor r21,r13 + std Y+58,r21 + ldd r12,Y+11 + ldd r18,Y+23 + ldd r19,Y+35 + ldd r20,Y+47 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,185 + sbci r29,255 + st Y,r23 + subi r28,71 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r10,r18 + or r10,r19 + eor r10,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+11,r21 + mov r21,r10 + and r21,r12 + eor r21,r13 + std Y+59,r21 + ldd r12,Y+12 + ldd r18,Y+24 + ldd r19,Y+36 + ldd r20,Y+48 + com r12 + mov r13,r18 + and r13,r12 + eor r13,r19 + mov r23,r20 + eor r23,r13 + subi r28,184 + sbci r29,255 + st Y,r23 + subi r28,72 + sbc r29,r1 + mov r14,r20 + eor r14,r12 + mov r11,r18 + or r11,r19 + eor r11,r14 + mov r12,r18 + eor r12,r20 + mov r21,r13 + and r21,r14 + eor r21,r12 + std Y+12,r21 + mov r21,r11 + and r21,r12 + eor r21,r13 + std Y+60,r21 + std Y+25,r11 + std Y+26,r26 + std Y+27,r27 + std Y+28,r2 + std Y+29,r3 + std Y+30,r4 + std Y+31,r5 + std Y+32,r6 + std Y+33,r7 + std Y+34,r8 + std Y+35,r9 + std Y+36,r10 + ldd r26,Y+49 + ldd r27,Y+50 + ldd r2,Y+51 + ldd r3,Y+52 + ldd r4,Y+53 + ldd r5,Y+54 + ldd r6,Y+55 + ldd r7,Y+56 + ldd r8,Y+57 + ldd r9,Y+58 + ldd r10,Y+59 + ldd r11,Y+60 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + adc r26,r1 + std Y+13,r26 + std Y+14,r27 + std Y+15,r2 + std Y+16,r3 + std Y+17,r4 + std Y+18,r5 + std Y+19,r6 + std Y+20,r7 + std Y+21,r8 + std Y+22,r9 + std Y+23,r10 + std Y+24,r11 + adiw r28,61 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y + subi r28,72 + sbc r29,r1 + bst r26,0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + ror r27 + ror r26 + bld r11,7 + std Y+37,r5 + std Y+38,r6 + std Y+39,r7 + std Y+40,r8 + std Y+41,r9 + std Y+42,r10 + std Y+43,r11 + std Y+44,r26 + std Y+45,r27 + std Y+46,r2 + std Y+47,r3 + std Y+48,r4 + dec r22 + breq 5542f + rjmp 99b +5542: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + ldd r4,Y+17 + ldd r5,Y+18 + ldd r6,Y+19 + ldd r7,Y+20 + ldd r8,Y+21 + ldd r9,Y+22 + ldd r10,Y+23 + ldd r11,Y+24 + std Z+12,r26 + std Z+13,r27 + std Z+14,r2 + std Z+15,r3 + std Z+16,r4 + std Z+17,r5 + std Z+18,r6 + std Z+19,r7 + std Z+20,r8 + std Z+21,r9 + std Z+22,r10 + std Z+23,r11 + ldd r26,Y+25 + ldd r27,Y+26 + ldd r2,Y+27 + ldd r3,Y+28 + ldd r4,Y+29 + ldd r5,Y+30 + ldd r6,Y+31 + ldd r7,Y+32 + ldd r8,Y+33 + ldd r9,Y+34 + ldd r10,Y+35 + ldd r11,Y+36 + std Z+24,r26 + std Z+25,r27 + std Z+26,r2 + std Z+27,r3 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+32,r8 + std Z+33,r9 + std Z+34,r10 + std Z+35,r11 + ldd r26,Y+37 + ldd r27,Y+38 + ldd r2,Y+39 + ldd r3,Y+40 + ldd r4,Y+41 + ldd r5,Y+42 + ldd r6,Y+43 + ldd r7,Y+44 + ldd r8,Y+45 + ldd r9,Y+46 + ldd r10,Y+47 + ldd r11,Y+48 + std Z+36,r26 + std Z+37,r27 + std Z+38,r2 + std Z+39,r3 + std Z+40,r4 + std Z+41,r5 + std Z+42,r6 + std Z+43,r7 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + subi r28,184 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot384_permute_7, .-knot384_permute_7 + +#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-512-avr.S b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-512-avr.S new file mode 100644 index 0000000..6f92ac3 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot-512-avr.S @@ -0,0 +1,2315 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_7, @object + .size table_7, 104 +table_7: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 16 + .byte 32 + .byte 65 + .byte 3 + .byte 6 + .byte 12 + .byte 24 + .byte 48 + .byte 97 + .byte 66 + .byte 5 + .byte 10 + .byte 20 + .byte 40 + .byte 81 + .byte 35 + .byte 71 + .byte 15 + .byte 30 + .byte 60 + .byte 121 + .byte 114 + .byte 100 + .byte 72 + .byte 17 + .byte 34 + .byte 69 + .byte 11 + .byte 22 + .byte 44 + .byte 89 + .byte 51 + .byte 103 + .byte 78 + .byte 29 + .byte 58 + .byte 117 + .byte 106 + .byte 84 + .byte 41 + .byte 83 + .byte 39 + .byte 79 + .byte 31 + .byte 62 + .byte 125 + .byte 122 + .byte 116 + .byte 104 + .byte 80 + .byte 33 + .byte 67 + .byte 7 + .byte 14 + .byte 28 + .byte 56 + .byte 113 + .byte 98 + .byte 68 + .byte 9 + .byte 18 + .byte 36 + .byte 73 + .byte 19 + .byte 38 + .byte 77 + .byte 27 + .byte 54 + .byte 109 + .byte 90 + .byte 53 + .byte 107 + .byte 86 + .byte 45 + .byte 91 + .byte 55 + .byte 111 + .byte 94 + .byte 61 + .byte 123 + .byte 118 + .byte 108 + .byte 88 + .byte 49 + .byte 99 + .byte 70 + .byte 13 + .byte 26 + .byte 52 + .byte 105 + .byte 82 + .byte 37 + .byte 75 + .byte 23 + .byte 46 + .byte 93 + .byte 59 + .byte 119 + .byte 110 + .byte 92 + + .text +.global knot512_permute_7 + .type knot512_permute_7, @function +knot512_permute_7: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_7) + ldi r31,hi8(table_7) +#if defined(RAMPZ) + ldi r17,hh8(table_7) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_7, .-knot512_permute_7 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_8, @object + .size table_8, 140 +table_8: + .byte 1 + .byte 2 + .byte 4 + .byte 8 + .byte 17 + .byte 35 + .byte 71 + .byte 142 + .byte 28 + .byte 56 + .byte 113 + .byte 226 + .byte 196 + .byte 137 + .byte 18 + .byte 37 + .byte 75 + .byte 151 + .byte 46 + .byte 92 + .byte 184 + .byte 112 + .byte 224 + .byte 192 + .byte 129 + .byte 3 + .byte 6 + .byte 12 + .byte 25 + .byte 50 + .byte 100 + .byte 201 + .byte 146 + .byte 36 + .byte 73 + .byte 147 + .byte 38 + .byte 77 + .byte 155 + .byte 55 + .byte 110 + .byte 220 + .byte 185 + .byte 114 + .byte 228 + .byte 200 + .byte 144 + .byte 32 + .byte 65 + .byte 130 + .byte 5 + .byte 10 + .byte 21 + .byte 43 + .byte 86 + .byte 173 + .byte 91 + .byte 182 + .byte 109 + .byte 218 + .byte 181 + .byte 107 + .byte 214 + .byte 172 + .byte 89 + .byte 178 + .byte 101 + .byte 203 + .byte 150 + .byte 44 + .byte 88 + .byte 176 + .byte 97 + .byte 195 + .byte 135 + .byte 15 + .byte 31 + .byte 62 + .byte 125 + .byte 251 + .byte 246 + .byte 237 + .byte 219 + .byte 183 + .byte 111 + .byte 222 + .byte 189 + .byte 122 + .byte 245 + .byte 235 + .byte 215 + .byte 174 + .byte 93 + .byte 186 + .byte 116 + .byte 232 + .byte 209 + .byte 162 + .byte 68 + .byte 136 + .byte 16 + .byte 33 + .byte 67 + .byte 134 + .byte 13 + .byte 27 + .byte 54 + .byte 108 + .byte 216 + .byte 177 + .byte 99 + .byte 199 + .byte 143 + .byte 30 + .byte 60 + .byte 121 + .byte 243 + .byte 231 + .byte 206 + .byte 156 + .byte 57 + .byte 115 + .byte 230 + .byte 204 + .byte 152 + .byte 49 + .byte 98 + .byte 197 + .byte 139 + .byte 22 + .byte 45 + .byte 90 + .byte 180 + .byte 105 + .byte 210 + .byte 164 + .byte 72 + .byte 145 + .byte 34 + .byte 69 + + .text +.global knot512_permute_8 + .type knot512_permute_8, @function +knot512_permute_8: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + movw r30,r24 + in r28,0x3d + in r29,0x3e + subi r28,96 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 113 + ld r26,Z + ldd r27,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + std Y+1,r26 + std Y+2,r27 + std Y+3,r2 + std Y+4,r3 + std Y+5,r4 + std Y+6,r5 + std Y+7,r6 + std Y+8,r7 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + std Y+13,r12 + std Y+14,r13 + std Y+15,r14 + std Y+16,r15 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r8,Z+24 + ldd r9,Z+25 + ldd r10,Z+26 + ldd r11,Z+27 + ldd r12,Z+28 + ldd r13,Z+29 + ldd r14,Z+30 + ldd r15,Z+31 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + ldd r26,Z+32 + ldd r27,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r8,Z+40 + ldd r9,Z+41 + ldd r10,Z+42 + ldd r11,Z+43 + ldd r12,Z+44 + ldd r13,Z+45 + ldd r14,Z+46 + ldd r15,Z+47 + std Y+33,r26 + std Y+34,r27 + std Y+35,r2 + std Y+36,r3 + std Y+37,r4 + std Y+38,r5 + std Y+39,r6 + std Y+40,r7 + std Y+41,r8 + std Y+42,r9 + std Y+43,r10 + std Y+44,r11 + std Y+45,r12 + std Y+46,r13 + std Y+47,r14 + std Y+48,r15 + ldd r26,Z+48 + ldd r27,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r8,Z+56 + ldd r9,Z+57 + ldd r10,Z+58 + ldd r11,Z+59 + ldd r12,Z+60 + ldd r13,Z+61 + ldd r14,Z+62 + ldd r15,Z+63 + adiw r28,49 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y+,r12 + st Y+,r13 + st Y+,r14 + st Y,r15 + subi r28,64 + sbc r29,r1 + push r31 + push r30 + ldi r30,lo8(table_8) + ldi r31,hi8(table_8) +#if defined(RAMPZ) + ldi r17,hh8(table_8) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif +134: + ldd r24,Y+1 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r24,r18 + inc r30 + ldd r18,Y+17 + ldd r19,Y+33 + ldd r20,Y+49 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,175 + sbci r29,255 + st Y,r23 + subi r28,81 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r26,r18 + or r26,r19 + eor r26,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+1,r21 + mov r21,r26 + and r21,r24 + eor r21,r25 + subi r28,191 + sbci r29,255 + st Y,r21 + subi r28,65 + sbc r29,r1 + ldd r24,Y+2 + ldd r18,Y+18 + ldd r19,Y+34 + ldd r20,Y+50 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,174 + sbci r29,255 + st Y,r23 + subi r28,82 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r27,r18 + or r27,r19 + eor r27,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+2,r21 + mov r21,r27 + and r21,r24 + eor r21,r25 + subi r28,190 + sbci r29,255 + st Y,r21 + subi r28,66 + sbc r29,r1 + ldd r24,Y+3 + ldd r18,Y+19 + ldd r19,Y+35 + ldd r20,Y+51 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,173 + sbci r29,255 + st Y,r23 + subi r28,83 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r2,r18 + or r2,r19 + eor r2,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+3,r21 + mov r21,r2 + and r21,r24 + eor r21,r25 + subi r28,189 + sbci r29,255 + st Y,r21 + subi r28,67 + sbc r29,r1 + ldd r24,Y+4 + ldd r18,Y+20 + ldd r19,Y+36 + ldd r20,Y+52 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,172 + sbci r29,255 + st Y,r23 + subi r28,84 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r3,r18 + or r3,r19 + eor r3,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+4,r21 + mov r21,r3 + and r21,r24 + eor r21,r25 + subi r28,188 + sbci r29,255 + st Y,r21 + subi r28,68 + sbc r29,r1 + ldd r24,Y+5 + ldd r18,Y+21 + ldd r19,Y+37 + ldd r20,Y+53 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,171 + sbci r29,255 + st Y,r23 + subi r28,85 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r4,r18 + or r4,r19 + eor r4,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+5,r21 + mov r21,r4 + and r21,r24 + eor r21,r25 + subi r28,187 + sbci r29,255 + st Y,r21 + subi r28,69 + sbc r29,r1 + ldd r24,Y+6 + ldd r18,Y+22 + ldd r19,Y+38 + ldd r20,Y+54 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,170 + sbci r29,255 + st Y,r23 + subi r28,86 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r5,r18 + or r5,r19 + eor r5,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+6,r21 + mov r21,r5 + and r21,r24 + eor r21,r25 + subi r28,186 + sbci r29,255 + st Y,r21 + subi r28,70 + sbc r29,r1 + ldd r24,Y+7 + ldd r18,Y+23 + ldd r19,Y+39 + ldd r20,Y+55 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,169 + sbci r29,255 + st Y,r23 + subi r28,87 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r6,r18 + or r6,r19 + eor r6,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+7,r21 + mov r21,r6 + and r21,r24 + eor r21,r25 + subi r28,185 + sbci r29,255 + st Y,r21 + subi r28,71 + sbc r29,r1 + ldd r24,Y+8 + ldd r18,Y+24 + ldd r19,Y+40 + ldd r20,Y+56 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,168 + sbci r29,255 + st Y,r23 + subi r28,88 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r7,r18 + or r7,r19 + eor r7,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+8,r21 + mov r21,r7 + and r21,r24 + eor r21,r25 + subi r28,184 + sbci r29,255 + st Y,r21 + subi r28,72 + sbc r29,r1 + ldd r24,Y+9 + ldd r18,Y+25 + ldd r19,Y+41 + ldd r20,Y+57 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,167 + sbci r29,255 + st Y,r23 + subi r28,89 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r8,r18 + or r8,r19 + eor r8,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+9,r21 + mov r21,r8 + and r21,r24 + eor r21,r25 + subi r28,183 + sbci r29,255 + st Y,r21 + subi r28,73 + sbc r29,r1 + ldd r24,Y+10 + ldd r18,Y+26 + ldd r19,Y+42 + ldd r20,Y+58 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,166 + sbci r29,255 + st Y,r23 + subi r28,90 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r9,r18 + or r9,r19 + eor r9,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+10,r21 + mov r21,r9 + and r21,r24 + eor r21,r25 + subi r28,182 + sbci r29,255 + st Y,r21 + subi r28,74 + sbc r29,r1 + ldd r24,Y+11 + ldd r18,Y+27 + ldd r19,Y+43 + ldd r20,Y+59 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,165 + sbci r29,255 + st Y,r23 + subi r28,91 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r10,r18 + or r10,r19 + eor r10,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+11,r21 + mov r21,r10 + and r21,r24 + eor r21,r25 + subi r28,181 + sbci r29,255 + st Y,r21 + subi r28,75 + sbc r29,r1 + ldd r24,Y+12 + ldd r18,Y+28 + ldd r19,Y+44 + ldd r20,Y+60 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,164 + sbci r29,255 + st Y,r23 + subi r28,92 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r11,r18 + or r11,r19 + eor r11,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+12,r21 + mov r21,r11 + and r21,r24 + eor r21,r25 + subi r28,180 + sbci r29,255 + st Y,r21 + subi r28,76 + sbc r29,r1 + ldd r24,Y+13 + ldd r18,Y+29 + ldd r19,Y+45 + ldd r20,Y+61 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,163 + sbci r29,255 + st Y,r23 + subi r28,93 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r12,r18 + or r12,r19 + eor r12,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+13,r21 + mov r21,r12 + and r21,r24 + eor r21,r25 + subi r28,179 + sbci r29,255 + st Y,r21 + subi r28,77 + sbc r29,r1 + ldd r24,Y+14 + ldd r18,Y+30 + ldd r19,Y+46 + ldd r20,Y+62 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,162 + sbci r29,255 + st Y,r23 + subi r28,94 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r13,r18 + or r13,r19 + eor r13,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+14,r21 + mov r21,r13 + and r21,r24 + eor r21,r25 + subi r28,178 + sbci r29,255 + st Y,r21 + subi r28,78 + sbc r29,r1 + ldd r24,Y+15 + ldd r18,Y+31 + ldd r19,Y+47 + ldd r20,Y+63 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,161 + sbci r29,255 + st Y,r23 + subi r28,95 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r14,r18 + or r14,r19 + eor r14,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+15,r21 + mov r21,r14 + and r21,r24 + eor r21,r25 + subi r28,177 + sbci r29,255 + st Y,r21 + subi r28,79 + sbc r29,r1 + ldd r24,Y+16 + ldd r18,Y+32 + ldd r19,Y+48 + subi r28,192 + sbci r29,255 + ld r20,Y + subi r28,64 + sbc r29,r1 + com r24 + mov r25,r18 + and r25,r24 + eor r25,r19 + mov r23,r20 + eor r23,r25 + subi r28,160 + sbci r29,255 + st Y,r23 + subi r28,96 + sbc r29,r1 + mov r16,r20 + eor r16,r24 + mov r15,r18 + or r15,r19 + eor r15,r16 + mov r24,r18 + eor r24,r20 + mov r21,r25 + and r21,r16 + eor r21,r24 + std Y+16,r21 + mov r21,r15 + and r21,r24 + eor r21,r25 + subi r28,176 + sbci r29,255 + st Y,r21 + subi r28,80 + sbc r29,r1 + std Y+33,r14 + std Y+34,r15 + std Y+35,r26 + std Y+36,r27 + std Y+37,r2 + std Y+38,r3 + std Y+39,r4 + std Y+40,r5 + std Y+41,r6 + std Y+42,r7 + std Y+43,r8 + std Y+44,r9 + std Y+45,r10 + std Y+46,r11 + std Y+47,r12 + std Y+48,r13 + subi r28,191 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,80 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + std Y+17,r26 + std Y+18,r27 + std Y+19,r2 + std Y+20,r3 + std Y+21,r4 + std Y+22,r5 + std Y+23,r6 + std Y+24,r7 + std Y+25,r8 + std Y+26,r9 + std Y+27,r10 + std Y+28,r11 + std Y+29,r12 + std Y+30,r13 + std Y+31,r14 + std Y+32,r15 + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,96 + sbc r29,r1 + lsl r26 + rol r27 + rol r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + rol r10 + rol r11 + rol r12 + rol r13 + rol r14 + rol r15 + adc r26,r1 + adiw r28,49 + st Y+,r13 + st Y+,r14 + st Y+,r15 + st Y+,r26 + st Y+,r27 + st Y+,r2 + st Y+,r3 + st Y+,r4 + st Y+,r5 + st Y+,r6 + st Y+,r7 + st Y+,r8 + st Y+,r9 + st Y+,r10 + st Y+,r11 + st Y,r12 + subi r28,64 + sbc r29,r1 + dec r22 + breq 5812f + rjmp 134b +5812: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r2,Y+3 + ldd r3,Y+4 + ldd r4,Y+5 + ldd r5,Y+6 + ldd r6,Y+7 + ldd r7,Y+8 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + ldd r12,Y+13 + ldd r13,Y+14 + ldd r14,Y+15 + ldd r15,Y+16 + st Z,r26 + std Z+1,r27 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + ldd r26,Y+17 + ldd r27,Y+18 + ldd r2,Y+19 + ldd r3,Y+20 + ldd r4,Y+21 + ldd r5,Y+22 + ldd r6,Y+23 + ldd r7,Y+24 + ldd r8,Y+25 + ldd r9,Y+26 + ldd r10,Y+27 + ldd r11,Y+28 + ldd r12,Y+29 + ldd r13,Y+30 + ldd r14,Y+31 + ldd r15,Y+32 + std Z+16,r26 + std Z+17,r27 + std Z+18,r2 + std Z+19,r3 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r8 + std Z+25,r9 + std Z+26,r10 + std Z+27,r11 + std Z+28,r12 + std Z+29,r13 + std Z+30,r14 + std Z+31,r15 + ldd r26,Y+33 + ldd r27,Y+34 + ldd r2,Y+35 + ldd r3,Y+36 + ldd r4,Y+37 + ldd r5,Y+38 + ldd r6,Y+39 + ldd r7,Y+40 + ldd r8,Y+41 + ldd r9,Y+42 + ldd r10,Y+43 + ldd r11,Y+44 + ldd r12,Y+45 + ldd r13,Y+46 + ldd r14,Y+47 + ldd r15,Y+48 + std Z+32,r26 + std Z+33,r27 + std Z+34,r2 + std Z+35,r3 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r8 + std Z+41,r9 + std Z+42,r10 + std Z+43,r11 + std Z+44,r12 + std Z+45,r13 + std Z+46,r14 + std Z+47,r15 + adiw r28,49 + ld r26,Y+ + ld r27,Y+ + ld r2,Y+ + ld r3,Y+ + ld r4,Y+ + ld r5,Y+ + ld r6,Y+ + ld r7,Y+ + ld r8,Y+ + ld r9,Y+ + ld r10,Y+ + ld r11,Y+ + ld r12,Y+ + ld r13,Y+ + ld r14,Y+ + ld r15,Y + subi r28,64 + sbc r29,r1 + std Z+48,r26 + std Z+49,r27 + std Z+50,r2 + std Z+51,r3 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + std Z+56,r8 + std Z+57,r9 + std Z+58,r10 + std Z+59,r11 + std Z+60,r12 + std Z+61,r13 + std Z+62,r14 + std Z+63,r15 + subi r28,160 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size knot512_permute_8, .-knot512_permute_8 + +#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys/internal-knot.c b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot.c new file mode 100644 index 0000000..f8b378e --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot.c @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-knot.h" + +#if !defined(__AVR__) + +/* Round constants for the KNOT-256, KNOT-384, and KNOT-512 permutations */ +static uint8_t const rc6[52] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06, 0x0c, 0x18, 0x31, 0x22, + 0x05, 0x0a, 0x14, 0x29, 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28, + 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24, 0x09, 0x12, 0x25, 0x0b, + 0x16, 0x2d, 0x1b, 0x37, 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26, + 0x0d, 0x1a, 0x35, 0x2a +}; +static uint8_t const rc7[104] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03, 0x06, 0x0c, 0x18, 0x30, + 0x61, 0x42, 0x05, 0x0a, 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c, + 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b, 0x16, 0x2c, 0x59, 0x33, + 0x67, 0x4e, 0x1d, 0x3a, 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f, + 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43, 0x07, 0x0e, 0x1c, 0x38, + 0x71, 0x62, 0x44, 0x09, 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36, + 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37, 0x6f, 0x5e, 0x3d, 0x7b, + 0x76, 0x6c, 0x58, 0x31, 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25, + 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c +}; +static uint8_t const rc8[140] = { + 0x01, 0x02, 0x04, 0x08, 0x11, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x71, 0xe2, + 0xc4, 0x89, 0x12, 0x25, 0x4b, 0x97, 0x2e, 0x5c, 0xb8, 0x70, 0xe0, 0xc0, + 0x81, 0x03, 0x06, 0x0c, 0x19, 0x32, 0x64, 0xc9, 0x92, 0x24, 0x49, 0x93, + 0x26, 0x4d, 0x9b, 0x37, 0x6e, 0xdc, 0xb9, 0x72, 0xe4, 0xc8, 0x90, 0x20, + 0x41, 0x82, 0x05, 0x0a, 0x15, 0x2b, 0x56, 0xad, 0x5b, 0xb6, 0x6d, 0xda, + 0xb5, 0x6b, 0xd6, 0xac, 0x59, 0xb2, 0x65, 0xcb, 0x96, 0x2c, 0x58, 0xb0, + 0x61, 0xc3, 0x87, 0x0f, 0x1f, 0x3e, 0x7d, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, + 0x6f, 0xde, 0xbd, 0x7a, 0xf5, 0xeb, 0xd7, 0xae, 0x5d, 0xba, 0x74, 0xe8, + 0xd1, 0xa2, 0x44, 0x88, 0x10, 0x21, 0x43, 0x86, 0x0d, 0x1b, 0x36, 0x6c, + 0xd8, 0xb1, 0x63, 0xc7, 0x8f, 0x1e, 0x3c, 0x79, 0xf3, 0xe7, 0xce, 0x9c, + 0x39, 0x73, 0xe6, 0xcc, 0x98, 0x31, 0x62, 0xc5, 0x8b, 0x16, 0x2d, 0x5a, + 0xb4, 0x69, 0xd2, 0xa4, 0x48, 0x91, 0x22, 0x45 +}; + +/* Applies the KNOT S-box to four 64-bit words in bit-sliced mode */ +#define knot_sbox64(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint64_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +/* Applies the KNOT S-box to four 32-bit words in bit-sliced mode */ +#define knot_sbox32(a0, a1, a2, a3, b1, b2, b3) \ + do { \ + uint32_t t1, t3, t6; \ + t1 = ~(a0); \ + t3 = (a2) ^ ((a1) & t1); \ + (b3) = (a3) ^ t3; \ + t6 = (a3) ^ t1; \ + (b2) = ((a1) | (a2)) ^ t6; \ + t1 = (a1) ^ (a3); \ + (a0) = t1 ^ (t3 & t6); \ + (b1) = t3 ^ ((b2) & t1); \ + } while (0) + +static void knot256_permute + (knot256_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b1, b2, b3; + + /* Load the input state into local variables; each row is 64 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x1, x2, x3, b1, b2, b3); + + /* Linear diffusion layer */ + x1 = leftRotate1_64(b1); + x2 = leftRotate8_64(b2); + x3 = leftRotate25_64(b3); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); +#endif +} + +void knot256_permute_6(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc6, rounds); +} + +void knot256_permute_7(knot256_state_t *state, uint8_t rounds) +{ + knot256_permute(state, rc7, rounds); +} + +void knot384_permute_7(knot384_state_t *state, uint8_t rounds) +{ + const uint8_t *rc = rc7; + uint64_t b2, b4, b6; + uint32_t b3, b5, b7; + + /* Load the input state into local variables; each row is 96 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint32_t x1 = state->W[2]; + uint64_t x2 = state->W[3] | (((uint64_t)(state->W[4])) << 32); + uint32_t x3 = state->W[5]; + uint64_t x4 = state->S[3]; + uint32_t x5 = state->W[8]; + uint64_t x6 = state->W[9] | (((uint64_t)(state->W[10])) << 32); + uint32_t x7 = state->W[11]; +#else + uint64_t x0 = le_load_word64(state->B); + uint32_t x1 = le_load_word32(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 12); + uint32_t x3 = le_load_word32(state->B + 20); + uint64_t x4 = le_load_word64(state->B + 24); + uint32_t x5 = le_load_word32(state->B + 32); + uint64_t x6 = le_load_word64(state->B + 36); + uint32_t x7 = le_load_word32(state->B + 44); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox32(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotateShort_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (32 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + #define leftRotateLong_96(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | \ + (((uint64_t)(b1)) << ((bits) - 32)) | \ + ((b0) >> (96 - (bits))); \ + (a1) = (uint32_t)(((b0) << ((bits) - 32)) >> 32); \ + } while (0) + leftRotateShort_96(x2, x3, b2, b3, 1); + leftRotateShort_96(x4, x5, b4, b5, 8); + leftRotateLong_96(x6, x7, b6, b7, 55); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->W[2] = x1; + state->W[3] = (uint32_t)x2; + state->W[4] = (uint32_t)(x2 >> 32); + state->W[5] = x3; + state->S[3] = x4; + state->W[8] = x5; + state->W[9] = (uint32_t)x6; + state->W[10] = (uint32_t)(x6 >> 32); + state->W[11] = x7; +#else + le_store_word64(state->B, x0); + le_store_word32(state->B + 8, x1); + le_store_word64(state->B + 12, x2); + le_store_word32(state->B + 20, x3); + le_store_word64(state->B + 24, x4); + le_store_word32(state->B + 32, x5); + le_store_word64(state->B + 36, x6); + le_store_word32(state->B + 44, x7); +#endif +} + +static void knot512_permute + (knot512_state_t *state, const uint8_t *rc, uint8_t rounds) +{ + uint64_t b2, b3, b4, b5, b6, b7; + + /* Load the input state into local variables; each row is 128 bits */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + uint64_t x0 = state->S[0]; + uint64_t x1 = state->S[1]; + uint64_t x2 = state->S[2]; + uint64_t x3 = state->S[3]; + uint64_t x4 = state->S[4]; + uint64_t x5 = state->S[5]; + uint64_t x6 = state->S[6]; + uint64_t x7 = state->S[7]; +#else + uint64_t x0 = le_load_word64(state->B); + uint64_t x1 = le_load_word64(state->B + 8); + uint64_t x2 = le_load_word64(state->B + 16); + uint64_t x3 = le_load_word64(state->B + 24); + uint64_t x4 = le_load_word64(state->B + 32); + uint64_t x5 = le_load_word64(state->B + 40); + uint64_t x6 = le_load_word64(state->B + 48); + uint64_t x7 = le_load_word64(state->B + 56); +#endif + + /* Perform all permutation rounds */ + for (; rounds > 0; --rounds) { + /* Add the next round constant to the state */ + x0 ^= *rc++; + + /* Substitution layer */ + knot_sbox64(x0, x2, x4, x6, b2, b4, b6); + knot_sbox64(x1, x3, x5, x7, b3, b5, b7); + + /* Linear diffusion layer */ + #define leftRotate_128(a0, a1, b0, b1, bits) \ + do { \ + (a0) = ((b0) << (bits)) | ((b1) >> (64 - (bits))); \ + (a1) = ((b1) << (bits)) | ((b0) >> (64 - (bits))); \ + } while (0) + leftRotate_128(x2, x3, b2, b3, 1); + leftRotate_128(x4, x5, b4, b5, 16); + leftRotate_128(x6, x7, b6, b7, 25); + } + + /* Store the local variables to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0] = x0; + state->S[1] = x1; + state->S[2] = x2; + state->S[3] = x3; + state->S[4] = x4; + state->S[5] = x5; + state->S[6] = x6; + state->S[7] = x7; +#else + le_store_word64(state->B, x0); + le_store_word64(state->B + 8, x1); + le_store_word64(state->B + 16, x2); + le_store_word64(state->B + 24, x3); + le_store_word64(state->B + 32, x4); + le_store_word64(state->B + 40, x5); + le_store_word64(state->B + 48, x6); + le_store_word64(state->B + 56, x7); +#endif +} + +void knot512_permute_7(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc7, rounds); +} + +void knot512_permute_8(knot512_state_t *state, uint8_t rounds) +{ + knot512_permute(state, rc8, rounds); +} + +#endif /* !__AVR__ */ diff --git a/knot/Implementations/crypto_hash/knot512/rhys/internal-knot.h b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot.h new file mode 100644 index 0000000..88a782c --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/internal-knot.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_KNOT_H +#define LW_INTERNAL_KNOT_H + +#include "internal-util.h" + +/** + * \file internal-knot.h + * \brief Permutations that are used by the KNOT AEAD and hash algorithms. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Internal state of the KNOT-256 permutation. + */ +typedef union +{ + uint64_t S[4]; /**< Words of the state */ + uint8_t B[32]; /**< Bytes of the state */ + +} knot256_state_t; + +/** + * \brief Internal state of the KNOT-384 permutation. + */ +typedef union +{ + uint64_t S[6]; /**< 64-bit words of the state */ + uint32_t W[12]; /**< 32-bit words of the state */ + uint8_t B[48]; /**< Bytes of the state */ + +} knot384_state_t; + +/** + * \brief Internal state of the KNOT-512 permutation. + */ +typedef union +{ + uint64_t S[8]; /**< Words of the state */ + uint8_t B[64]; /**< Bytes of the state */ + +} knot512_state_t; + +/** + * \brief Permutes the KNOT-256 state, using 6-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 52. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_6(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-256 state, using 7-bit round constants. + * + * \param state The KNOT-256 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot256_permute_7(knot256_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-384 state, using 7-bit round constants. + * + * \param state The KNOT-384 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot384_permute_7(knot384_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 7-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 104. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_7(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Permutes the KNOT-512 state, using 8-bit round constants. + * + * \param state The KNOT-512 state to be permuted. + * \param rounds The number of rounds to be performed, 1 to 140. + * + * The input and output \a state will be in little-endian byte order. + */ +void knot512_permute_8(knot512_state_t *state, uint8_t rounds); + +/** + * \brief Generic pointer to a function that performs a KNOT permutation. + * + * \param state Points to the permutation state. + * \param round Number of rounds to perform. + */ +typedef void (*knot_permute_t)(void *state, uint8_t rounds); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys/internal-util.h b/knot/Implementations/crypto_hash/knot512/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/knot/Implementations/crypto_hash/knot512/rhys/knot-hash.c b/knot/Implementations/crypto_hash/knot512/rhys/knot-hash.c new file mode 100644 index 0000000..a4edecd --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/knot-hash.c @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "knot.h" +#include "internal-knot.h" +#include + +aead_hash_algorithm_t const knot_hash_256_256_algorithm = { + "KNOT-HASH-256-256", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_256, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_256_384_algorithm = { + "KNOT-HASH-256-384", + sizeof(int), + KNOT_HASH_256_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_256_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_384_384_algorithm = { + "KNOT-HASH-384-384", + sizeof(int), + KNOT_HASH_384_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_384_384, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const knot_hash_512_512_algorithm = { + "KNOT-HASH-512-512", + sizeof(int), + KNOT_HASH_512_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + knot_hash_512_512, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Input rate for KNOT-HASH-256-256. + */ +#define KNOT_HASH_256_256_RATE 4 + +/** + * \brief Input rate for KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_384_RATE 16 + +/** + * \brief Input rate for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_384_RATE 6 + +/** + * \brief Input rate for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_512_RATE 8 + +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot256_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_256_256_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_256_RATE); + knot256_permute_7(&state, 68); + in += KNOT_HASH_256_256_RATE; + inlen -= KNOT_HASH_256_256_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot256_permute_7(&state, 68); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot256_permute_7(&state, 68); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + state.B[sizeof(state.B) - 1] ^= 0x80; + while (inlen >= KNOT_HASH_256_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_256_384_RATE); + knot384_permute_7(&state, 80); + in += KNOT_HASH_256_384_RATE; + inlen -= KNOT_HASH_256_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 80); + memcpy(out, state.B, KNOT_HASH_256_SIZE / 2); + knot384_permute_7(&state, 80); + memcpy(out + KNOT_HASH_256_SIZE / 2, state.B, KNOT_HASH_256_SIZE / 2); + return 0; +} + +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot384_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_384_384_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_384_384_RATE); + knot384_permute_7(&state, 104); + in += KNOT_HASH_384_384_RATE; + inlen -= KNOT_HASH_384_384_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot384_permute_7(&state, 104); + memcpy(out, state.B, KNOT_HASH_384_SIZE / 2); + knot384_permute_7(&state, 104); + memcpy(out + KNOT_HASH_384_SIZE / 2, state.B, KNOT_HASH_384_SIZE / 2); + return 0; +} + +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + knot512_state_t state; + unsigned temp; + memset(state.B, 0, sizeof(state.B)); + while (inlen >= KNOT_HASH_512_512_RATE) { + lw_xor_block(state.B, in, KNOT_HASH_512_512_RATE); + knot512_permute_8(&state, 140); + in += KNOT_HASH_512_512_RATE; + inlen -= KNOT_HASH_512_512_RATE; + } + temp = (unsigned)inlen; + lw_xor_block(state.B, in, temp); + state.B[temp] ^= 0x01; + knot512_permute_8(&state, 140); + memcpy(out, state.B, KNOT_HASH_512_SIZE / 2); + knot512_permute_8(&state, 140); + memcpy(out + KNOT_HASH_512_SIZE / 2, state.B, KNOT_HASH_512_SIZE / 2); + return 0; +} diff --git a/knot/Implementations/crypto_hash/knot512/rhys/knot.h b/knot/Implementations/crypto_hash/knot512/rhys/knot.h new file mode 100644 index 0000000..e2c5198 --- /dev/null +++ b/knot/Implementations/crypto_hash/knot512/rhys/knot.h @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_KNOT_H +#define LWCRYPTO_KNOT_H + +#include "aead-common.h" + +/** + * \file knot.h + * \brief KNOT authenticated encryption and hash algorithms. + * + * KNOT is a family of authenticated encryption and hash algorithms built + * around a permutation and the MonkeyDuplex sponge construction. The + * family members are: + * + * \li KNOT-AEAD-128-256 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 256-bit permutation. This is the primary + * encryption member of the family. + * \li KNOT-AEAD-128-384 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-192-384 with a 192-bit key, a 192-bit nonce, and a + * 192-bit tag, built around a 384-bit permutation. + * \li KNOT-AEAD-256-512 with a 256-bit key, a 256-bit nonce, and a + * 256-bit tag, built around a 512-bit permutation. + * \li KNOT-HASH-256-256 with a 256-bit hash output, built around a + * 256-bit permutation. This is the primary hashing member of the family. + * \li KNOT-HASH-256-384 with a 256-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-384-384 with a 384-bit hash output, built around a + * 384-bit permutation. + * \li KNOT-HASH-512-512 with a 512-bit hash output, built around a + * 512-bit permutation. + * + * References: https://csrc.nist.gov/CSRC/media/Projects/lightweight-cryptography/documents/round-2/spec-doc-rnd2/knot-spec-round.pdf + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-128-256 and + * KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_128_NONCE_SIZE 16 + +/** + * \brief Size of the key for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_KEY_SIZE 24 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_TAG_SIZE 24 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-192-384. + */ +#define KNOT_AEAD_192_NONCE_SIZE 24 + +/** + * \brief Size of the key for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for KNOT-AEAD-256-512. + */ +#define KNOT_AEAD_256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for KNOT-AEAD-128-256 and KNOT-AEAD-128-384. + */ +#define KNOT_AEAD_256_NONCE_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-256-256 and KNOT-HASH-256-384. + */ +#define KNOT_HASH_256_SIZE 32 + +/** + * \brief Size of the hash for KNOT-HASH-384-384. + */ +#define KNOT_HASH_384_SIZE 48 + +/** + * \brief Size of the hash for KNOT-HASH-512-512. + */ +#define KNOT_HASH_512_SIZE 64 + +/** + * \brief Meta-information block for the KNOT-AEAD-128-256 cipher. + */ +extern aead_cipher_t const knot_aead_128_256_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-128-384 cipher. + */ +extern aead_cipher_t const knot_aead_128_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-192-384 cipher. + */ +extern aead_cipher_t const knot_aead_192_384_cipher; + +/** + * \brief Meta-information block for the KNOT-AEAD-256-512 cipher. + */ +extern aead_cipher_t const knot_aead_256_512_cipher; + +/** + * \brief Meta-information block for the KNOT-HASH-256-256 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_256_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-256-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_256_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-384-384 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_384_384_algorithm; + +/** + * \brief Meta-information block for the KNOT-HASH-512-512 algorithm. + */ +extern aead_hash_algorithm_t const knot_hash_512_512_algorithm; + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_256_decrypt() + */ +int knot_aead_128_256_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_256_encrypt() + */ +int knot_aead_128_256_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_128_384_decrypt() + */ +int knot_aead_128_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-128-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_128_384_encrypt() + */ +int knot_aead_128_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_192_384_decrypt() + */ +int knot_aead_192_384_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-192-384. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_192_384_encrypt() + */ +int knot_aead_192_384_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa knot_aead_256_512_decrypt() + */ +int knot_aead_256_512_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with KNOT-AEAD-256-512. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa knot_aead_256_512_encrypt() + */ +int knot_aead_256_512_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-256. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_256 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-256-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_256_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_256_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-384-384. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_384_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_384_384 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with KNOT-HASH-512-512. + * + * \param out Buffer to receive the hash output which must be at least + * KNOT_HASH_512_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int knot_hash_512_512 + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.c b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.h b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/api.h b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/api.h deleted file mode 100644 index 4bf8f5c..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/encrypt.c b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/encrypt.c deleted file mode 100644 index 1573370..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "lotus-locus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return locus_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return locus_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64-avr.S b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64-avr.S deleted file mode 100644 index fdb668d..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64-avr.S +++ /dev/null @@ -1,6047 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gift64n_init - .type gift64n_init, @function -gift64n_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ret - .size gift64n_init, .-gift64n_init - - .text -.global gift64n_encrypt - .type gift64n_encrypt, @function -gift64n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 28 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Z+4 - ldd r7,Z+5 - ldd r8,Z+6 - ldd r9,Z+7 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Z+12 - ldd r7,Z+13 - ldd r8,Z+14 - ldd r9,Z+15 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,0 - bst r18,1 - bld r22,0 - bst r18,2 - bld r2,0 - bst r18,3 - bld r4,0 - bst r18,4 - bld r20,1 - bst r18,5 - bld r22,1 - bst r18,6 - bld r2,1 - bst r18,7 - bld r4,1 - bst r19,0 - bld r20,2 - bst r19,1 - bld r22,2 - bst r19,2 - bld r2,2 - bst r19,3 - bld r4,2 - bst r19,4 - bld r20,3 - bst r19,5 - bld r22,3 - bst r19,6 - bld r2,3 - bst r19,7 - bld r4,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,4 - bst r18,1 - bld r22,4 - bst r18,2 - bld r2,4 - bst r18,3 - bld r4,4 - bst r18,4 - bld r20,5 - bst r18,5 - bld r22,5 - bst r18,6 - bld r2,5 - bst r18,7 - bld r4,5 - bst r19,0 - bld r20,6 - bst r19,1 - bld r22,6 - bst r19,2 - bld r2,6 - bst r19,3 - bld r4,6 - bst r19,4 - bld r20,7 - bst r19,5 - bld r22,7 - bst r19,6 - bld r2,7 - bst r19,7 - bld r4,7 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,0 - bst r18,1 - bld r23,0 - bst r18,2 - bld r3,0 - bst r18,3 - bld r5,0 - bst r18,4 - bld r21,1 - bst r18,5 - bld r23,1 - bst r18,6 - bld r3,1 - bst r18,7 - bld r5,1 - bst r19,0 - bld r21,2 - bst r19,1 - bld r23,2 - bst r19,2 - bld r3,2 - bst r19,3 - bld r5,2 - bst r19,4 - bld r21,3 - bst r19,5 - bld r23,3 - bst r19,6 - bld r3,3 - bst r19,7 - bld r5,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,4 - bst r18,1 - bld r23,4 - bst r18,2 - bld r3,4 - bst r18,3 - bld r5,4 - bst r18,4 - bld r21,5 - bst r18,5 - bld r23,5 - bst r18,6 - bld r3,5 - bst r18,7 - bld r5,5 - bst r19,0 - bld r21,6 - bst r19,1 - bld r23,6 - bst r19,2 - bld r3,6 - bst r19,3 - bld r5,6 - bst r19,4 - bld r21,7 - bst r19,5 - bld r23,7 - bst r19,6 - bld r3,7 - bst r19,7 - bld r5,7 - rcall 1061f - ldi r18,1 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,3 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,7 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,15 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,31 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,62 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,61 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,59 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,55 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,47 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,30 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,60 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,57 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,51 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,39 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,14 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,29 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,58 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,53 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,43 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,22 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,44 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,24 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,48 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,33 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,2 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,5 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,11 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rjmp 1252f -1061: - mov r0,r20 - and r0,r2 - eor r22,r0 - mov r0,r21 - and r0,r3 - eor r23,r0 - mov r0,r22 - and r0,r4 - eor r20,r0 - mov r0,r23 - and r0,r5 - eor r21,r0 - mov r0,r20 - or r0,r22 - eor r2,r0 - mov r0,r21 - or r0,r23 - eor r3,r0 - eor r4,r2 - eor r5,r3 - eor r22,r4 - eor r23,r5 - com r4 - com r5 - movw r18,r20 - mov r0,r22 - and r0,r18 - eor r2,r0 - mov r0,r23 - and r0,r19 - eor r3,r0 - movw r20,r4 - movw r4,r18 - bst r20,1 - bld r0,0 - bst r20,4 - bld r20,1 - bst r20,3 - bld r20,4 - bst r21,4 - bld r20,3 - bst r0,0 - bld r21,4 - bst r20,2 - bld r0,0 - bst r21,0 - bld r20,2 - bst r0,0 - bld r21,0 - bst r20,5 - bld r0,0 - bst r20,7 - bld r20,5 - bst r21,7 - bld r20,7 - bst r21,5 - bld r21,7 - bst r0,0 - bld r21,5 - bst r20,6 - bld r0,0 - bst r21,3 - bld r20,6 - bst r21,6 - bld r21,3 - bst r21,1 - bld r21,6 - bst r0,0 - bld r21,1 - bst r22,0 - bld r0,0 - bst r22,1 - bld r22,0 - bst r22,5 - bld r22,1 - bst r22,4 - bld r22,5 - bst r0,0 - bld r22,4 - bst r22,2 - bld r0,0 - bst r23,1 - bld r22,2 - bst r22,7 - bld r23,1 - bst r23,4 - bld r22,7 - bst r0,0 - bld r23,4 - bst r22,3 - bld r0,0 - bst r23,5 - bld r22,3 - bst r22,6 - bld r23,5 - bst r23,0 - bld r22,6 - bst r0,0 - bld r23,0 - bst r23,2 - bld r0,0 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r23,6 - bld r23,7 - bst r0,0 - bld r23,6 - bst r2,0 - bld r0,0 - bst r2,2 - bld r2,0 - bst r3,2 - bld r2,2 - bst r3,0 - bld r3,2 - bst r0,0 - bld r3,0 - bst r2,1 - bld r0,0 - bst r2,6 - bld r2,1 - bst r3,1 - bld r2,6 - bst r2,4 - bld r3,1 - bst r0,0 - bld r2,4 - bst r2,3 - bld r0,0 - bst r3,6 - bld r2,3 - bst r3,3 - bld r3,6 - bst r3,4 - bld r3,3 - bst r0,0 - bld r3,4 - bst r2,7 - bld r0,0 - bst r3,5 - bld r2,7 - bst r0,0 - bld r3,5 - bst r4,0 - bld r0,0 - bst r4,3 - bld r4,0 - bst r5,7 - bld r4,3 - bst r5,4 - bld r5,7 - bst r0,0 - bld r5,4 - bst r4,1 - bld r0,0 - bst r4,7 - bld r4,1 - bst r5,6 - bld r4,7 - bst r5,0 - bld r5,6 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,3 - bld r4,2 - bst r5,5 - bld r5,3 - bst r4,4 - bld r5,5 - bst r0,0 - bld r4,4 - bst r4,5 - bld r0,0 - bst r4,6 - bld r4,5 - bst r5,2 - bld r4,6 - bst r5,1 - bld r5,2 - bst r0,0 - bld r5,1 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - ret -1252: - ldd r26,Y+17 - ldd r27,Y+18 - bst r20,0 - bld r18,0 - bst r22,0 - bld r18,1 - bst r2,0 - bld r18,2 - bst r4,0 - bld r18,3 - bst r20,1 - bld r18,4 - bst r22,1 - bld r18,5 - bst r2,1 - bld r18,6 - bst r4,1 - bld r18,7 - bst r20,2 - bld r19,0 - bst r22,2 - bld r19,1 - bst r2,2 - bld r19,2 - bst r4,2 - bld r19,3 - bst r20,3 - bld r19,4 - bst r22,3 - bld r19,5 - bst r2,3 - bld r19,6 - bst r4,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r20,4 - bld r18,0 - bst r22,4 - bld r18,1 - bst r2,4 - bld r18,2 - bst r4,4 - bld r18,3 - bst r20,5 - bld r18,4 - bst r22,5 - bld r18,5 - bst r2,5 - bld r18,6 - bst r4,5 - bld r18,7 - bst r20,6 - bld r19,0 - bst r22,6 - bld r19,1 - bst r2,6 - bld r19,2 - bst r4,6 - bld r19,3 - bst r20,7 - bld r19,4 - bst r22,7 - bld r19,5 - bst r2,7 - bld r19,6 - bst r4,7 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,0 - bld r18,0 - bst r23,0 - bld r18,1 - bst r3,0 - bld r18,2 - bst r5,0 - bld r18,3 - bst r21,1 - bld r18,4 - bst r23,1 - bld r18,5 - bst r3,1 - bld r18,6 - bst r5,1 - bld r18,7 - bst r21,2 - bld r19,0 - bst r23,2 - bld r19,1 - bst r3,2 - bld r19,2 - bst r5,2 - bld r19,3 - bst r21,3 - bld r19,4 - bst r23,3 - bld r19,5 - bst r3,3 - bld r19,6 - bst r5,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,4 - bld r18,0 - bst r23,4 - bld r18,1 - bst r3,4 - bld r18,2 - bst r5,4 - bld r18,3 - bst r21,5 - bld r18,4 - bst r23,5 - bld r18,5 - bst r3,5 - bld r18,6 - bst r5,5 - bld r18,7 - bst r21,6 - bld r19,0 - bst r23,6 - bld r19,1 - bst r3,6 - bld r19,2 - bst r5,6 - bld r19,3 - bst r21,7 - bld r19,4 - bst r23,7 - bld r19,5 - bst r3,7 - bld r19,6 - bst r5,7 - bld r19,7 - st X+,r18 - st X+,r19 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64n_encrypt, .-gift64n_encrypt - - .text -.global gift64n_decrypt - .type gift64n_decrypt, @function -gift64n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 28 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Z+4 - ldd r7,Z+5 - ldd r8,Z+6 - ldd r9,Z+7 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Z+12 - ldd r7,Z+13 - ldd r8,Z+14 - ldd r9,Z+15 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,0 - bst r18,1 - bld r22,0 - bst r18,2 - bld r2,0 - bst r18,3 - bld r4,0 - bst r18,4 - bld r20,1 - bst r18,5 - bld r22,1 - bst r18,6 - bld r2,1 - bst r18,7 - bld r4,1 - bst r19,0 - bld r20,2 - bst r19,1 - bld r22,2 - bst r19,2 - bld r2,2 - bst r19,3 - bld r4,2 - bst r19,4 - bld r20,3 - bst r19,5 - bld r22,3 - bst r19,6 - bld r2,3 - bst r19,7 - bld r4,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,4 - bst r18,1 - bld r22,4 - bst r18,2 - bld r2,4 - bst r18,3 - bld r4,4 - bst r18,4 - bld r20,5 - bst r18,5 - bld r22,5 - bst r18,6 - bld r2,5 - bst r18,7 - bld r4,5 - bst r19,0 - bld r20,6 - bst r19,1 - bld r22,6 - bst r19,2 - bld r2,6 - bst r19,3 - bld r4,6 - bst r19,4 - bld r20,7 - bst r19,5 - bld r22,7 - bst r19,6 - bld r2,7 - bst r19,7 - bld r4,7 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,0 - bst r18,1 - bld r23,0 - bst r18,2 - bld r3,0 - bst r18,3 - bld r5,0 - bst r18,4 - bld r21,1 - bst r18,5 - bld r23,1 - bst r18,6 - bld r3,1 - bst r18,7 - bld r5,1 - bst r19,0 - bld r21,2 - bst r19,1 - bld r23,2 - bst r19,2 - bld r3,2 - bst r19,3 - bld r5,2 - bst r19,4 - bld r21,3 - bst r19,5 - bld r23,3 - bst r19,6 - bld r3,3 - bst r19,7 - bld r5,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,4 - bst r18,1 - bld r23,4 - bst r18,2 - bld r3,4 - bst r18,3 - bld r5,4 - bst r18,4 - bld r21,5 - bst r18,5 - bld r23,5 - bst r18,6 - bld r3,5 - bst r18,7 - bld r5,5 - bst r19,0 - bld r21,6 - bst r19,1 - bld r23,6 - bst r19,2 - bld r3,6 - bst r19,3 - bld r5,6 - bst r19,4 - bld r21,7 - bst r19,5 - bld r23,7 - bst r19,6 - bld r3,7 - bst r19,7 - bld r5,7 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,11 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,5 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,2 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,33 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,48 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,24 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,44 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,22 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,43 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,53 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,58 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,29 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,14 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,39 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,51 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,57 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,60 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,30 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,47 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,55 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,59 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,61 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,62 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,31 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,15 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,7 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,3 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,1 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - rjmp 1362f -1173: - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - bst r20,1 - bld r0,0 - bst r21,4 - bld r20,1 - bst r20,3 - bld r21,4 - bst r20,4 - bld r20,3 - bst r0,0 - bld r20,4 - bst r20,2 - bld r0,0 - bst r21,0 - bld r20,2 - bst r0,0 - bld r21,0 - bst r20,5 - bld r0,0 - bst r21,5 - bld r20,5 - bst r21,7 - bld r21,5 - bst r20,7 - bld r21,7 - bst r0,0 - bld r20,7 - bst r20,6 - bld r0,0 - bst r21,1 - bld r20,6 - bst r21,6 - bld r21,1 - bst r21,3 - bld r21,6 - bst r0,0 - bld r21,3 - bst r22,0 - bld r0,0 - bst r22,4 - bld r22,0 - bst r22,5 - bld r22,4 - bst r22,1 - bld r22,5 - bst r0,0 - bld r22,1 - bst r22,2 - bld r0,0 - bst r23,4 - bld r22,2 - bst r22,7 - bld r23,4 - bst r23,1 - bld r22,7 - bst r0,0 - bld r23,1 - bst r22,3 - bld r0,0 - bst r23,0 - bld r22,3 - bst r22,6 - bld r23,0 - bst r23,5 - bld r22,6 - bst r0,0 - bld r23,5 - bst r23,2 - bld r0,0 - bst r23,6 - bld r23,2 - bst r23,7 - bld r23,6 - bst r23,3 - bld r23,7 - bst r0,0 - bld r23,3 - bst r2,0 - bld r0,0 - bst r3,0 - bld r2,0 - bst r3,2 - bld r3,0 - bst r2,2 - bld r3,2 - bst r0,0 - bld r2,2 - bst r2,1 - bld r0,0 - bst r2,4 - bld r2,1 - bst r3,1 - bld r2,4 - bst r2,6 - bld r3,1 - bst r0,0 - bld r2,6 - bst r2,3 - bld r0,0 - bst r3,4 - bld r2,3 - bst r3,3 - bld r3,4 - bst r3,6 - bld r3,3 - bst r0,0 - bld r3,6 - bst r2,7 - bld r0,0 - bst r3,5 - bld r2,7 - bst r0,0 - bld r3,5 - bst r4,0 - bld r0,0 - bst r5,4 - bld r4,0 - bst r5,7 - bld r5,4 - bst r4,3 - bld r5,7 - bst r0,0 - bld r4,3 - bst r4,1 - bld r0,0 - bst r5,0 - bld r4,1 - bst r5,6 - bld r5,0 - bst r4,7 - bld r5,6 - bst r0,0 - bld r4,7 - bst r4,2 - bld r0,0 - bst r4,4 - bld r4,2 - bst r5,5 - bld r4,4 - bst r5,3 - bld r5,5 - bst r0,0 - bld r5,3 - bst r4,5 - bld r0,0 - bst r5,1 - bld r4,5 - bst r5,2 - bld r5,1 - bst r4,6 - bld r5,2 - bst r0,0 - bld r4,6 - movw r18,r4 - movw r4,r20 - movw r20,r18 - and r18,r22 - and r19,r23 - eor r2,r18 - eor r3,r19 - com r4 - com r5 - eor r22,r4 - eor r23,r5 - eor r4,r2 - eor r5,r3 - mov r0,r20 - or r0,r22 - eor r2,r0 - mov r0,r21 - or r0,r23 - eor r3,r0 - mov r0,r22 - and r0,r4 - eor r20,r0 - mov r0,r23 - and r0,r5 - eor r21,r0 - mov r0,r20 - and r0,r2 - eor r22,r0 - mov r0,r21 - and r0,r3 - eor r23,r0 - ret -1362: - ldd r26,Y+17 - ldd r27,Y+18 - bst r20,0 - bld r18,0 - bst r22,0 - bld r18,1 - bst r2,0 - bld r18,2 - bst r4,0 - bld r18,3 - bst r20,1 - bld r18,4 - bst r22,1 - bld r18,5 - bst r2,1 - bld r18,6 - bst r4,1 - bld r18,7 - bst r20,2 - bld r19,0 - bst r22,2 - bld r19,1 - bst r2,2 - bld r19,2 - bst r4,2 - bld r19,3 - bst r20,3 - bld r19,4 - bst r22,3 - bld r19,5 - bst r2,3 - bld r19,6 - bst r4,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r20,4 - bld r18,0 - bst r22,4 - bld r18,1 - bst r2,4 - bld r18,2 - bst r4,4 - bld r18,3 - bst r20,5 - bld r18,4 - bst r22,5 - bld r18,5 - bst r2,5 - bld r18,6 - bst r4,5 - bld r18,7 - bst r20,6 - bld r19,0 - bst r22,6 - bld r19,1 - bst r2,6 - bld r19,2 - bst r4,6 - bld r19,3 - bst r20,7 - bld r19,4 - bst r22,7 - bld r19,5 - bst r2,7 - bld r19,6 - bst r4,7 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,0 - bld r18,0 - bst r23,0 - bld r18,1 - bst r3,0 - bld r18,2 - bst r5,0 - bld r18,3 - bst r21,1 - bld r18,4 - bst r23,1 - bld r18,5 - bst r3,1 - bld r18,6 - bst r5,1 - bld r18,7 - bst r21,2 - bld r19,0 - bst r23,2 - bld r19,1 - bst r3,2 - bld r19,2 - bst r5,2 - bld r19,3 - bst r21,3 - bld r19,4 - bst r23,3 - bld r19,5 - bst r3,3 - bld r19,6 - bst r5,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,4 - bld r18,0 - bst r23,4 - bld r18,1 - bst r3,4 - bld r18,2 - bst r5,4 - bld r18,3 - bst r21,5 - bld r18,4 - bst r23,5 - bld r18,5 - bst r3,5 - bld r18,6 - bst r5,5 - bld r18,7 - bst r21,6 - bld r19,0 - bst r23,6 - bld r19,1 - bst r3,6 - bld r19,2 - bst r5,6 - bld r19,3 - bst r21,7 - bld r19,4 - bst r23,7 - bld r19,5 - bst r3,7 - bld r19,6 - bst r5,7 - bld r19,7 - st X+,r18 - st X+,r19 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64n_decrypt, .-gift64n_decrypt - - .text -.global gift64t_encrypt - .type gift64t_encrypt, @function -gift64t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 30 - ld r8,Z - ldd r9,Z+1 - ldd r10,Z+2 - ldd r11,Z+3 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,0 - bst r20,1 - bld r2,0 - bst r20,2 - bld r4,0 - bst r20,3 - bld r6,0 - bst r20,4 - bld r22,1 - bst r20,5 - bld r2,1 - bst r20,6 - bld r4,1 - bst r20,7 - bld r6,1 - bst r21,0 - bld r22,2 - bst r21,1 - bld r2,2 - bst r21,2 - bld r4,2 - bst r21,3 - bld r6,2 - bst r21,4 - bld r22,3 - bst r21,5 - bld r2,3 - bst r21,6 - bld r4,3 - bst r21,7 - bld r6,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,4 - bst r20,1 - bld r2,4 - bst r20,2 - bld r4,4 - bst r20,3 - bld r6,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r2,5 - bst r20,6 - bld r4,5 - bst r20,7 - bld r6,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r2,6 - bst r21,2 - bld r4,6 - bst r21,3 - bld r6,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r2,7 - bst r21,6 - bld r4,7 - bst r21,7 - bld r6,7 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,0 - bst r20,1 - bld r3,0 - bst r20,2 - bld r5,0 - bst r20,3 - bld r7,0 - bst r20,4 - bld r23,1 - bst r20,5 - bld r3,1 - bst r20,6 - bld r5,1 - bst r20,7 - bld r7,1 - bst r21,0 - bld r23,2 - bst r21,1 - bld r3,2 - bst r21,2 - bld r5,2 - bst r21,3 - bld r7,2 - bst r21,4 - bld r23,3 - bst r21,5 - bld r3,3 - bst r21,6 - bld r5,3 - bst r21,7 - bld r7,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,4 - bst r20,1 - bld r3,4 - bst r20,2 - bld r5,4 - bst r20,3 - bld r7,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r3,5 - bst r20,6 - bld r5,5 - bst r20,7 - bld r7,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r3,6 - bst r21,2 - bld r5,6 - bst r21,3 - bld r7,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r3,7 - bst r21,6 - bld r5,7 - bst r21,7 - bld r7,7 - rcall 1073f - ldi r20,1 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,3 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,7 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,15 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,31 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,62 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,61 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,59 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,55 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,47 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,30 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,60 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,57 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,51 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,39 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,14 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,29 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,58 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,53 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,43 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,22 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,44 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,24 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,48 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,33 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,2 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,5 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,11 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rjmp 1264f -1073: - mov r0,r22 - and r0,r4 - eor r2,r0 - mov r0,r23 - and r0,r5 - eor r3,r0 - mov r0,r2 - and r0,r6 - eor r22,r0 - mov r0,r3 - and r0,r7 - eor r23,r0 - mov r0,r22 - or r0,r2 - eor r4,r0 - mov r0,r23 - or r0,r3 - eor r5,r0 - eor r6,r4 - eor r7,r5 - eor r2,r6 - eor r3,r7 - com r6 - com r7 - movw r20,r22 - mov r0,r2 - and r0,r20 - eor r4,r0 - mov r0,r3 - and r0,r21 - eor r5,r0 - movw r22,r6 - movw r6,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r22,3 - bld r22,4 - bst r23,4 - bld r22,3 - bst r0,0 - bld r23,4 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r0,0 - bld r23,0 - bst r22,5 - bld r0,0 - bst r22,7 - bld r22,5 - bst r23,7 - bld r22,7 - bst r23,5 - bld r23,7 - bst r0,0 - bld r23,5 - bst r22,6 - bld r0,0 - bst r23,3 - bld r22,6 - bst r23,6 - bld r23,3 - bst r23,1 - bld r23,6 - bst r0,0 - bld r23,1 - bst r2,0 - bld r0,0 - bst r2,1 - bld r2,0 - bst r2,5 - bld r2,1 - bst r2,4 - bld r2,5 - bst r0,0 - bld r2,4 - bst r2,2 - bld r0,0 - bst r3,1 - bld r2,2 - bst r2,7 - bld r3,1 - bst r3,4 - bld r2,7 - bst r0,0 - bld r3,4 - bst r2,3 - bld r0,0 - bst r3,5 - bld r2,3 - bst r2,6 - bld r3,5 - bst r3,0 - bld r2,6 - bst r0,0 - bld r3,0 - bst r3,2 - bld r0,0 - bst r3,3 - bld r3,2 - bst r3,7 - bld r3,3 - bst r3,6 - bld r3,7 - bst r0,0 - bld r3,6 - bst r4,0 - bld r0,0 - bst r4,2 - bld r4,0 - bst r5,2 - bld r4,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,1 - bld r0,0 - bst r4,6 - bld r4,1 - bst r5,1 - bld r4,6 - bst r4,4 - bld r5,1 - bst r0,0 - bld r4,4 - bst r4,3 - bld r0,0 - bst r5,6 - bld r4,3 - bst r5,3 - bld r5,6 - bst r5,4 - bld r5,3 - bst r0,0 - bld r5,4 - bst r4,7 - bld r0,0 - bst r5,5 - bld r4,7 - bst r0,0 - bld r5,5 - bst r6,0 - bld r0,0 - bst r6,3 - bld r6,0 - bst r7,7 - bld r6,3 - bst r7,4 - bld r7,7 - bst r0,0 - bld r7,4 - bst r6,1 - bld r0,0 - bst r6,7 - bld r6,1 - bst r7,6 - bld r6,7 - bst r7,0 - bld r7,6 - bst r0,0 - bld r7,0 - bst r6,2 - bld r0,0 - bst r7,3 - bld r6,2 - bst r7,5 - bld r7,3 - bst r6,4 - bld r7,5 - bst r0,0 - bld r6,4 - bst r6,5 - bld r0,0 - bst r6,6 - bld r6,5 - bst r7,2 - bld r6,6 - bst r7,1 - bld r7,2 - bst r0,0 - bld r7,1 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ret -1264: - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r20,0 - bst r2,0 - bld r20,1 - bst r4,0 - bld r20,2 - bst r6,0 - bld r20,3 - bst r22,1 - bld r20,4 - bst r2,1 - bld r20,5 - bst r4,1 - bld r20,6 - bst r6,1 - bld r20,7 - bst r22,2 - bld r21,0 - bst r2,2 - bld r21,1 - bst r4,2 - bld r21,2 - bst r6,2 - bld r21,3 - bst r22,3 - bld r21,4 - bst r2,3 - bld r21,5 - bst r4,3 - bld r21,6 - bst r6,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r22,4 - bld r20,0 - bst r2,4 - bld r20,1 - bst r4,4 - bld r20,2 - bst r6,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r2,5 - bld r20,5 - bst r4,5 - bld r20,6 - bst r6,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r2,6 - bld r21,1 - bst r4,6 - bld r21,2 - bst r6,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r2,7 - bld r21,5 - bst r4,7 - bld r21,6 - bst r6,7 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,0 - bld r20,0 - bst r3,0 - bld r20,1 - bst r5,0 - bld r20,2 - bst r7,0 - bld r20,3 - bst r23,1 - bld r20,4 - bst r3,1 - bld r20,5 - bst r5,1 - bld r20,6 - bst r7,1 - bld r20,7 - bst r23,2 - bld r21,0 - bst r3,2 - bld r21,1 - bst r5,2 - bld r21,2 - bst r7,2 - bld r21,3 - bst r23,3 - bld r21,4 - bst r3,3 - bld r21,5 - bst r5,3 - bld r21,6 - bst r7,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,4 - bld r20,0 - bst r3,4 - bld r20,1 - bst r5,4 - bld r20,2 - bst r7,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r3,5 - bld r20,5 - bst r5,5 - bld r20,6 - bst r7,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r3,6 - bld r21,1 - bst r5,6 - bld r21,2 - bst r7,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r3,7 - bld r21,5 - bst r5,7 - bld r21,6 - bst r7,7 - bld r21,7 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64t_encrypt, .-gift64t_encrypt - - .text -.global gift64t_decrypt - .type gift64t_decrypt, @function -gift64t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 30 - ld r8,Z - ldd r9,Z+1 - ldd r10,Z+2 - ldd r11,Z+3 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,0 - bst r20,1 - bld r2,0 - bst r20,2 - bld r4,0 - bst r20,3 - bld r6,0 - bst r20,4 - bld r22,1 - bst r20,5 - bld r2,1 - bst r20,6 - bld r4,1 - bst r20,7 - bld r6,1 - bst r21,0 - bld r22,2 - bst r21,1 - bld r2,2 - bst r21,2 - bld r4,2 - bst r21,3 - bld r6,2 - bst r21,4 - bld r22,3 - bst r21,5 - bld r2,3 - bst r21,6 - bld r4,3 - bst r21,7 - bld r6,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,4 - bst r20,1 - bld r2,4 - bst r20,2 - bld r4,4 - bst r20,3 - bld r6,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r2,5 - bst r20,6 - bld r4,5 - bst r20,7 - bld r6,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r2,6 - bst r21,2 - bld r4,6 - bst r21,3 - bld r6,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r2,7 - bst r21,6 - bld r4,7 - bst r21,7 - bld r6,7 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,0 - bst r20,1 - bld r3,0 - bst r20,2 - bld r5,0 - bst r20,3 - bld r7,0 - bst r20,4 - bld r23,1 - bst r20,5 - bld r3,1 - bst r20,6 - bld r5,1 - bst r20,7 - bld r7,1 - bst r21,0 - bld r23,2 - bst r21,1 - bld r3,2 - bst r21,2 - bld r5,2 - bst r21,3 - bld r7,2 - bst r21,4 - bld r23,3 - bst r21,5 - bld r3,3 - bst r21,6 - bld r5,3 - bst r21,7 - bld r7,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,4 - bst r20,1 - bld r3,4 - bst r20,2 - bld r5,4 - bst r20,3 - bld r7,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r3,5 - bst r20,6 - bld r5,5 - bst r20,7 - bld r7,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r3,6 - bst r21,2 - bld r5,6 - bst r21,3 - bld r7,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r3,7 - bst r21,6 - bld r5,7 - bst r21,7 - bld r7,7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,11 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,5 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,2 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,33 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,48 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,24 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,44 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,22 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,43 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,53 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,58 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,29 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,14 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,39 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,51 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,57 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,60 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,30 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,47 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,55 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,59 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,61 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,62 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,31 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,15 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,7 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,3 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,1 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - rjmp 1374f -1185: - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - bst r22,1 - bld r0,0 - bst r23,4 - bld r22,1 - bst r22,3 - bld r23,4 - bst r22,4 - bld r22,3 - bst r0,0 - bld r22,4 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r0,0 - bld r23,0 - bst r22,5 - bld r0,0 - bst r23,5 - bld r22,5 - bst r23,7 - bld r23,5 - bst r22,7 - bld r23,7 - bst r0,0 - bld r22,7 - bst r22,6 - bld r0,0 - bst r23,1 - bld r22,6 - bst r23,6 - bld r23,1 - bst r23,3 - bld r23,6 - bst r0,0 - bld r23,3 - bst r2,0 - bld r0,0 - bst r2,4 - bld r2,0 - bst r2,5 - bld r2,4 - bst r2,1 - bld r2,5 - bst r0,0 - bld r2,1 - bst r2,2 - bld r0,0 - bst r3,4 - bld r2,2 - bst r2,7 - bld r3,4 - bst r3,1 - bld r2,7 - bst r0,0 - bld r3,1 - bst r2,3 - bld r0,0 - bst r3,0 - bld r2,3 - bst r2,6 - bld r3,0 - bst r3,5 - bld r2,6 - bst r0,0 - bld r3,5 - bst r3,2 - bld r0,0 - bst r3,6 - bld r3,2 - bst r3,7 - bld r3,6 - bst r3,3 - bld r3,7 - bst r0,0 - bld r3,3 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r4,2 - bld r5,2 - bst r0,0 - bld r4,2 - bst r4,1 - bld r0,0 - bst r4,4 - bld r4,1 - bst r5,1 - bld r4,4 - bst r4,6 - bld r5,1 - bst r0,0 - bld r4,6 - bst r4,3 - bld r0,0 - bst r5,4 - bld r4,3 - bst r5,3 - bld r5,4 - bst r5,6 - bld r5,3 - bst r0,0 - bld r5,6 - bst r4,7 - bld r0,0 - bst r5,5 - bld r4,7 - bst r0,0 - bld r5,5 - bst r6,0 - bld r0,0 - bst r7,4 - bld r6,0 - bst r7,7 - bld r7,4 - bst r6,3 - bld r7,7 - bst r0,0 - bld r6,3 - bst r6,1 - bld r0,0 - bst r7,0 - bld r6,1 - bst r7,6 - bld r7,0 - bst r6,7 - bld r7,6 - bst r0,0 - bld r6,7 - bst r6,2 - bld r0,0 - bst r6,4 - bld r6,2 - bst r7,5 - bld r6,4 - bst r7,3 - bld r7,5 - bst r0,0 - bld r7,3 - bst r6,5 - bld r0,0 - bst r7,1 - bld r6,5 - bst r7,2 - bld r7,1 - bst r6,6 - bld r7,2 - bst r0,0 - bld r6,6 - movw r20,r6 - movw r6,r22 - movw r22,r20 - and r20,r2 - and r21,r3 - eor r4,r20 - eor r5,r21 - com r6 - com r7 - eor r2,r6 - eor r3,r7 - eor r6,r4 - eor r7,r5 - mov r0,r22 - or r0,r2 - eor r4,r0 - mov r0,r23 - or r0,r3 - eor r5,r0 - mov r0,r2 - and r0,r6 - eor r22,r0 - mov r0,r3 - and r0,r7 - eor r23,r0 - mov r0,r22 - and r0,r4 - eor r2,r0 - mov r0,r23 - and r0,r5 - eor r3,r0 - ret -1374: - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r20,0 - bst r2,0 - bld r20,1 - bst r4,0 - bld r20,2 - bst r6,0 - bld r20,3 - bst r22,1 - bld r20,4 - bst r2,1 - bld r20,5 - bst r4,1 - bld r20,6 - bst r6,1 - bld r20,7 - bst r22,2 - bld r21,0 - bst r2,2 - bld r21,1 - bst r4,2 - bld r21,2 - bst r6,2 - bld r21,3 - bst r22,3 - bld r21,4 - bst r2,3 - bld r21,5 - bst r4,3 - bld r21,6 - bst r6,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r22,4 - bld r20,0 - bst r2,4 - bld r20,1 - bst r4,4 - bld r20,2 - bst r6,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r2,5 - bld r20,5 - bst r4,5 - bld r20,6 - bst r6,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r2,6 - bld r21,1 - bst r4,6 - bld r21,2 - bst r6,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r2,7 - bld r21,5 - bst r4,7 - bld r21,6 - bst r6,7 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,0 - bld r20,0 - bst r3,0 - bld r20,1 - bst r5,0 - bld r20,2 - bst r7,0 - bld r20,3 - bst r23,1 - bld r20,4 - bst r3,1 - bld r20,5 - bst r5,1 - bld r20,6 - bst r7,1 - bld r20,7 - bst r23,2 - bld r21,0 - bst r3,2 - bld r21,1 - bst r5,2 - bld r21,2 - bst r7,2 - bld r21,3 - bst r23,3 - bld r21,4 - bst r3,3 - bld r21,5 - bst r5,3 - bld r21,6 - bst r7,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,4 - bld r20,0 - bst r3,4 - bld r20,1 - bst r5,4 - bld r20,2 - bst r7,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r3,5 - bld r20,5 - bst r5,5 - bld r20,6 - bst r7,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r3,6 - bld r21,1 - bst r5,6 - bld r21,2 - bst r7,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r3,7 - bld r21,5 - bst r5,7 - bld r21,6 - bst r7,7 - bld r21,7 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64t_decrypt, .-gift64t_decrypt - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.c b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.c deleted file mode 100644 index 81bc8a3..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.c +++ /dev/null @@ -1,1205 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift64.h" -#include "internal-util.h" -#include - -#if !GIFT64_LOW_MEMORY - -/* Round constants for GIFT-64 in the fixsliced representation */ -static uint32_t const GIFT64_RC[28] = { - 0x22000011, 0x00002299, 0x11118811, 0x880000ff, 0x33111199, 0x990022ee, - 0x22119933, 0x880033bb, 0x22119999, 0x880022ff, 0x11119922, 0x880033cc, - 0x33008899, 0x99002299, 0x33118811, 0x880000ee, 0x33110099, 0x990022aa, - 0x22118833, 0x880022bb, 0x22111188, 0x88002266, 0x00009922, 0x88003300, - 0x22008811, 0x00002288, 0x00118811, 0x880000bb -}; - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift64b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t t = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= t; \ - (a) ^= t << (shift); \ - } while (0) - -/** - * \brief Performs the GIFT-64 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift64b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-64 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift64b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/* Rotates a state word left by 1 position in the fixsliced representation: - * - * 0 1 2 3 1 2 3 0 - * 4 5 6 7 ==> 5 6 7 4 - * 8 9 10 11 9 10 11 8 - * 12 13 14 15 13 14 14 12 - */ -#define gift64b_rotate_left_1(x) \ - ((((x) >> 1) & 0x77777777U) | (((x) & 0x11111111U) << 3)) - -/* Rotates a state word left by 2 positions in the fixsliced representation: - * - * 0 1 2 3 2 3 0 1 - * 4 5 6 7 ==> 6 7 4 5 - * 8 9 10 11 10 11 8 9 - * 12 13 14 15 14 15 12 13 - */ -#define gift64b_rotate_left_2(x) \ - ((((x) >> 2) & 0x33333333U) | (((x) & 0x33333333U) << 2)) - -/* Rotates a state word left by 3 positions in the fixsliced representation: - * - * 0 1 2 3 3 0 1 2 - * 4 5 6 7 ==> 7 4 5 6 - * 8 9 10 11 11 8 9 10 - * 12 13 14 15 15 12 13 14 - */ -#define gift64b_rotate_left_3(x) \ - ((((x) >> 3) & 0x11111111U) | (((x) & 0x77777777U) << 1)) - -/* Rotates a state word right by 1 position in the fixsliced representation */ -#define gift64b_rotate_right_1(x) gift64b_rotate_left_3(x) - -/* Rotates a state word right by 2 positions in the fixsliced representation */ -#define gift64b_rotate_right_2(x) gift64b_rotate_left_2(x) - -/* Rotates a state word right by 3 positions in the fixsliced representation */ -#define gift64b_rotate_right_3(x) gift64b_rotate_left_1(x) - -/* Rotates a state word up by 1 position in the fixsliced representation: - * - * 0 1 2 3 4 5 6 7 - * 4 5 6 7 ==> 8 9 10 11 - * 8 9 10 11 12 13 14 15 - * 12 13 14 15 0 1 2 3 - */ -#define gift64b_rotate_up_1(x) (rightRotate8((x))) - -/* Rotates a state word up by 2 positions in the fixsliced representation: - * - * 0 1 2 3 8 9 10 11 - * 4 5 6 7 ==> 12 13 14 15 - * 8 9 10 11 0 1 2 3 - * 12 13 14 15 4 5 6 7 - */ -#define gift64b_rotate_up_2(x) (rightRotate16((x))) - -/* Rotates a state word up by 3 positions in the fixsliced representation: - * - * 0 1 2 3 12 13 14 15 - * 4 5 6 7 ==> 0 1 2 3 - * 8 9 10 11 4 5 6 7 - * 12 13 14 15 8 9 10 11 - */ -#define gift64b_rotate_up_3(x) (rightRotate24((x))) - -/* Rotates a state word down by 1 position in the fixsliced representation */ -#define gift64b_rotate_down_1(x) gift64b_rotate_up_3(x) - -/* Rotates a state word down by 2 positions in the fixsliced representation */ -#define gift64b_rotate_down_2(x) gift64b_rotate_up_2(x) - -/* Rotates a state word down by 3 positions in the fixsliced representation */ -#define gift64b_rotate_down_3(x) gift64b_rotate_up_1(x) - -/* Permutation code to rearrange key bits into fixsliced form. Permutations - * generated wth "http://programming.sirrida.de/calcperm.php" */ -#define gift64b_rearrange1_transpose_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 8 16 24 3 11 19 27 2 10 18 26 1 9 17 25 * */ \ - bit_permute_step(out, 0x0000CCCCU, 16); \ - bit_permute_step(out, 0x30030330U, 2); \ - bit_permute_step(out, 0x00960096U, 8); \ - bit_permute_step(out, 0x05500550U, 1); \ - bit_permute_step(out, 0x0A0A0A0AU, 4); \ - } while (0) -#define gift64b_rearrange1_transpose_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 8 16 24 3 11 19 27 2 10 18 26 1 9 17 25 * */ \ - bit_permute_step(out, 0x0000CCCCU, 16); \ - bit_permute_step(out, 0x30030330U, 2); \ - bit_permute_step(out, 0x00960096U, 8); \ - bit_permute_step(out, 0x05500550U, 1); \ - bit_permute_step(out, 0x0A0A0A0AU, 4); \ - } while (0) -#define gift64b_rearrange1_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 1 2 3 24 25 26 27 16 17 18 19 8 9 10 11 * */ \ - out = (out & 0x0000000FU) | ((out & 0x00000F00U) << 8) | \ - ((out & 0x000000F0U) << 20) | ((out & 0x0000F000U) >> 4); \ - } while (0) -#define gift64b_rearrange1_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 1 2 3 24 25 26 27 16 17 18 19 8 9 10 11 * */ \ - out = (out & 0x0000000FU) | ((out & 0x00000F00U) << 8) | \ - ((out & 0x000000F0U) << 20) | ((out & 0x0000F000U) >> 4); \ - } while (0) -#define gift64b_rearrange2_transpose_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 * */ \ - bit_permute_step(out, 0x0A0A0A0AU, 3); \ - bit_permute_step(out, 0x00CC00CCU, 6); \ - bit_permute_step(out, 0x0000F0F0U, 12); \ - bit_permute_step(out, 0x0000FF00U, 8); \ - } while (0) -#define gift64b_rearrange2_transpose_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 * */ \ - bit_permute_step(out, 0x0A0A0A0AU, 3); \ - bit_permute_step(out, 0x00CC00CCU, 6); \ - bit_permute_step(out, 0x0000F0F0U, 12); \ - bit_permute_step(out, 0x0000FF00U, 8); \ - } while (0) -#define gift64b_rearrange2_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27 * */ \ - out = (out & 0x0000000FU) | ((out & 0x000000F0U) << 4) | \ - ((out & 0x00000F00U) << 8) | ((out & 0x0000F000U) << 12); \ - } while (0) -#define gift64b_rearrange2_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27 * */ \ - out = (out & 0x0000000FU) | ((out & 0x000000F0U) << 4) | \ - ((out & 0x00000F00U) << 8) | ((out & 0x0000F000U) << 12); \ - } while (0) - -void gift64n_update_round_keys(gift64n_key_schedule_t *ks) -{ - uint32_t x; - - /* First round */ - gift64b_rearrange1_transpose_low(x, ks->k[3]); - ks->rk[0] = ~(x | (x << 4)); - gift64b_rearrange1_transpose_high(x, ks->k[3]); - ks->rk[1] = x | (x << 4); - - /* Second round */ - gift64b_rearrange1_low(x, ks->k[2]); - x = x | (x << 4); - gift64b_swap_move(x, x, 0x22222222U, 2); - ks->rk[2] = ~x; - gift64b_rearrange1_high(x, ks->k[2]); - x = x | (x << 4); - gift64b_swap_move(x, x, 0x22222222U, 2); - ks->rk[3] = x; - - /* Third round */ - gift64b_rearrange2_transpose_low(x, ks->k[1]); - gift64b_swap_move(x, x, 0x00000F00U, 16); - ks->rk[4] = ~(x | (x << 4)); - gift64b_rearrange2_transpose_high(x, ks->k[1]); - gift64b_swap_move(x, x, 0x00000F00U, 16); - ks->rk[5] = x | (x << 4); - - /* Fourth round */ - gift64b_rearrange2_low(x, ks->k[0]); - ks->rk[6] = ~(x | (x << 4)); - gift64b_rearrange2_high(x, ks->k[0]); - ks->rk[7] = x | (x << 4); -} - -/** - * \brief Perform the core of GIFT-64 encryption on two blocks in parallel. - * - * \param ks Points to the key schedule to use to encrypt the blocks. - * \param state Buffer containing the two blocks in bit-sliced form, - * on input and output. - * \param Tweak value or zero if there is no tweak. - */ -static void gift64b_encrypt_core - (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) -{ - const uint32_t *rc = GIFT64_RC; - uint32_t s0, s1, s2, s3, temp; - uint32_t rk[8]; - uint8_t round; - - /* Start with the pre-computed round keys for the first four rounds */ - memcpy(rk, ks->rk, sizeof(ks->rk)); - - /* Load the state into local variables */ - s0 = state[0]; - s1 = state[1]; - s2 = state[2]; - s3 = state[3]; - - /* Perform all 28 rounds four at a time. We use the "fixslicing" method. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of four rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 4 rounds. - */ - for (round = 0; round < 28; round += 4, rc += 4) { - /* 1st round - S-box, rotate left, add round key */ - gift64b_sbox(s0, s1, s2, s3); - s1 = gift64b_rotate_left_1(s1); - s2 = gift64b_rotate_left_2(s2); - s0 = gift64b_rotate_left_3(s0); - s3 ^= rk[0]; - s1 ^= rk[1]; - s0 ^= rc[0]; - - /* 2nd round - S-box, rotate up, add round key (s0 and s3 swapped) */ - gift64b_sbox(s3, s1, s2, s0); - s1 = gift64b_rotate_up_1(s1); - s2 = gift64b_rotate_up_2(s2); - s3 = gift64b_rotate_up_3(s3); - s0 ^= rk[2]; - s1 ^= rk[3]; - s3 ^= rc[1]; - - /* 3rd round - S-box, rotate right, add round key */ - gift64b_sbox(s0, s1, s2, s3); - s1 = gift64b_rotate_right_1(s1); - s2 = gift64b_rotate_right_2(s2); - s0 = gift64b_rotate_right_3(s0); - s3 ^= rk[4]; - s1 ^= rk[5]; - s0 ^= rc[2]; - - /* 4th round - S-box, rotate down, add round key (s0 and s3 swapped) */ - gift64b_sbox(s3, s1, s2, s0); - s1 = gift64b_rotate_down_1(s1); - s2 = gift64b_rotate_down_2(s2); - s3 = gift64b_rotate_down_3(s3); - s0 ^= rk[6]; - s1 ^= rk[7]; - s3 ^= rc[3]; - - /* Add the tweak every four encryption rounds except the last */ - if (round < 24) - s2 ^= tweak; - - /* Derive the round keys for the next 4 rounds */ - rk[0] = gift64b_rotate_left_1(rk[0]); - rk[1] = (gift64b_rotate_left_3(rk[1]) << 16) | (rk[1] >> 16); - rk[2] = rightRotate8(rk[2]); - temp = gift64b_rotate_left_2(rk[3]); - rk[3] = (temp & 0x99999999U) | leftRotate8(temp & 0x66666666U); - rk[4] = gift64b_rotate_left_3(rk[4]); - temp = rightRotate16(rk[5]); - rk[5] = (gift64b_rotate_left_1(temp) & 0x00FFFF00U) | - (temp & 0xFF0000FFU); - rk[6] = leftRotate8(rk[6]); - temp = gift64b_rotate_left_2(rk[7]); - rk[7] = (temp & 0x33333333U) | rightRotate8(temp & 0xCCCCCCCCU); - } - - /* Copy the local variables to the output state */ - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; -} - -/** - * \brief Perform the core of GIFT-64 decryption on two blocks in parallel. - * - * \param ks Points to the key schedule to use to encrypt the blocks. - * \param state Buffer containing the two blocks in bit-sliced form, - * on input and output. - * \param Tweak value or zero if there is no tweak. - */ -static void gift64b_decrypt_core - (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) -{ - const uint32_t *rc = GIFT64_RC + 28 - 4; - uint32_t s0, s1, s2, s3, temp; - uint32_t rk[8]; - uint8_t round; - - /* Start with the pre-computed round keys for the first four rounds */ - memcpy(rk, ks->rk, sizeof(ks->rk)); - - /* Fast forward the key schedule to the end by permuting each round - * key by the amount it would see under the full set of rounds. - * Generated with "http://programming.sirrida.de/calcperm.php" */ - /* P0: 1 2 3 0 5 6 7 4 9 10 11 8 13 14 15 12 17 18 - * 19 16 21 22 23 20 25 26 27 24 29 30 31 28 */ - rk[0] = ((rk[0] & 0x77777777U) << 1) | ((rk[0] & 0x88888888U) >> 3); - /* P1: 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 - * 31 3 0 1 2 7 4 5 6 11 8 9 10 15 12 13 14 */ - rk[1] = ((rk[1] & 0xEEEE0000U) >> 17) | ((rk[1] & 0x0000FFFFU) << 16) | - ((rk[1] & 0x11110000U) >> 13); - /* P2: 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 - * 24 25 26 27 28 29 30 31 0 1 2 3 4 5 6 7 */ - rk[2] = leftRotate8(rk[2]); - /* P3: 2 27 24 1 6 31 28 5 10 3 0 9 14 7 4 13 18 11 - * 8 17 22 15 12 21 26 19 16 25 30 23 20 29 */ - rk[3] = ((rk[3] & 0x11111111U) << 2) | leftRotate22(rk[3] & 0x44444444U) | - leftRotate26(rk[3] & 0x22222222U) | ((rk[3] & 0x88888888U) >> 2); - /* P4: 3 0 1 2 7 4 5 6 11 8 9 10 15 12 13 14 19 16 - * 17 18 23 20 21 22 27 24 25 26 31 28 29 30 */ - rk[4] = ((rk[4] & 0x11111111U) << 3) | ((rk[4] & 0xEEEEEEEEU) >> 1); - /* P5: 16 17 18 19 20 21 22 23 25 26 27 24 29 30 31 - * 28 1 2 3 0 5 6 7 4 8 9 10 11 12 13 14 15 */ - rk[5] = leftRotate13(rk[5] & 0x00888800U) | - leftRotate16(rk[5] & 0xFF0000FFU) | - leftRotate17(rk[5] & 0x00777700U); - /* P6: 24 25 26 27 28 29 30 31 0 1 2 3 4 5 6 7 8 9 10 - * 11 12 13 14 15 16 17 18 19 20 21 22 23 */ - rk[6] = leftRotate24(rk[6]); - /* P7: 2 3 8 9 6 7 12 13 10 11 16 17 14 15 20 21 18 19 - * 24 25 22 23 28 29 26 27 0 1 30 31 4 5 */ - rk[7] = ((rk[7] & 0x33333333U) << 2) | leftRotate6(rk[7] & 0xCCCCCCCCU); - - /* Load the state into local variables */ - s0 = state[0]; - s1 = state[1]; - s2 = state[2]; - s3 = state[3]; - - /* Perform all 28 rounds four at a time. We use the "fixslicing" method. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of four rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 4 rounds. - */ - for (round = 0; round < 28; round += 4, rc -= 4) { - /* Derive the round keys for the previous 4 rounds */ - rk[0] = gift64b_rotate_right_1(rk[0]); - temp = rk[1] >> 16; - rk[1] = gift64b_rotate_right_3(temp) | (rk[1] << 16); - rk[2] = leftRotate8(rk[2]); - temp = (rk[3] & 0x99999999U) | rightRotate8(rk[3] & 0x66666666U); - rk[3] = gift64b_rotate_right_2(temp); - rk[4] = gift64b_rotate_right_3(rk[4]); - temp = (gift64b_rotate_right_1(rk[5]) & 0x00FFFF00U) | - (rk[5] & 0xFF0000FFU); - rk[5] = leftRotate16(temp); - rk[6] = rightRotate8(rk[6]); - temp = (rk[7] & 0x33333333U) | leftRotate8(rk[7] & 0xCCCCCCCCU); - rk[7] = gift64b_rotate_right_2(temp); - - /* Add the tweak every four decryption rounds except the first */ - if (round != 0) - s2 ^= tweak; - - /* 4th round - S-box, rotate down, add round key (s0 and s3 swapped) */ - s0 ^= rk[6]; - s1 ^= rk[7]; - s3 ^= rc[3]; - s1 = gift64b_rotate_up_1(s1); - s2 = gift64b_rotate_up_2(s2); - s3 = gift64b_rotate_up_3(s3); - gift64b_inv_sbox(s0, s1, s2, s3); - - /* 3rd round - S-box, rotate right, add round key */ - s3 ^= rk[4]; - s1 ^= rk[5]; - s0 ^= rc[2]; - s1 = gift64b_rotate_left_1(s1); - s2 = gift64b_rotate_left_2(s2); - s0 = gift64b_rotate_left_3(s0); - gift64b_inv_sbox(s3, s1, s2, s0); - - /* 2nd round - S-box, rotate up, add round key (s0 and s3 swapped) */ - s0 ^= rk[2]; - s1 ^= rk[3]; - s3 ^= rc[1]; - s1 = gift64b_rotate_down_1(s1); - s2 = gift64b_rotate_down_2(s2); - s3 = gift64b_rotate_down_3(s3); - gift64b_inv_sbox(s0, s1, s2, s3); - - /* 1st round - S-box, rotate left, add round key */ - s3 ^= rk[0]; - s1 ^= rk[1]; - s0 ^= rc[0]; - s1 = gift64b_rotate_right_1(s1); - s2 = gift64b_rotate_right_2(s2); - s0 = gift64b_rotate_right_3(s0); - gift64b_inv_sbox(s3, s1, s2, s0); - } - - /* Copy the local variables to the output state */ - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; -} - -void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian byte order from the LOTUS-AEAD submission */ - ks->k[0] = le_load_word32(key + 12); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key); - gift64n_update_round_keys(ks); -} - -/** - * \brief Converts the GIFT-64 nibble-based representation into word-based - * (littlen-endian version). - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The output words will be in fixsliced form. Technically the output will - * contain two blocks for gift64b_encrypt_core() to process in parallel but - * both blocks will have the same value. - */ -static void gift64n_to_words(uint32_t output[4], const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input block into 32-bit words */ - s0 = le_load_word32(input); - s2 = le_load_word32(input + 4); - - /* Rearrange the bits in the block */ - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); - - /* Split into two identical blocks in fixsliced form */ - s1 = s0; - s3 = s2; - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -/** - * \brief Converts the GIFT-64 word-based representation into nibble-based - * (little-endian version). - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - * - * The input words are in fixsliced form. Technically there are two - * identical blocks in the input. We drop one when we write to the output. - */ -static void gift64n_to_nibbles(unsigned char *output, const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Load the state and split the two blocks into separate words */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); - - /* Rearrange the bits in the first block back into nibble form */ - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - le_store_word32(output, s0); - le_store_word32(output + 4, s2); -} - -void gift64n_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_encrypt_core(ks, state, 0); - gift64n_to_nibbles(output, state); -} - -void gift64n_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_decrypt_core(ks, state, 0); - gift64n_to_nibbles(output, state); -} - -/* 4-bit tweak values expanded to 32-bit in fixsliced form */ -static uint32_t const GIFT64_tweaks[16] = { - 0x00000000, 0xee11ee11, 0xdd22dd22, 0x33333333, 0xbb44bb44, 0x55555555, - 0x66666666, 0x88778877, 0x77887788, 0x99999999, 0xaaaaaaaa, 0x44bb44bb, - 0xcccccccc, 0x22dd22dd, 0x11ee11ee, 0xffffffff -}; - -void gift64t_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_encrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); - gift64n_to_nibbles(output, state); -} - -void gift64t_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_decrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); - gift64n_to_nibbles(output, state); -} - -#elif !defined(__AVR__) /* GIFT64_LOW_MEMORY */ - -/* Round constants for GIFT-64 */ -static uint8_t const GIFT64_RC[28] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B -}; - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint16_t y = (_y); \ - uint16_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step_simple */ -#define bit_permute_step_simple(_y, mask, shift) \ - do { \ - (_y) = (((_y) & (mask)) << (shift)) | (((_y) >> (shift)) & (mask)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 4 bits with respect to the next: - * - * P0: 0 12 8 4 1 13 9 5 2 14 10 6 3 15 11 7 - * P1: 4 0 12 8 5 1 13 9 6 2 14 10 7 3 15 11 - * P2: 8 4 0 12 9 5 1 13 10 6 2 14 11 7 3 15 - * P3: 12 8 4 0 13 9 5 1 14 10 6 2 15 11 7 3 - * - * The most efficient permutation from the online generator was P1, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P1 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM1_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a, 3); \ - bit_permute_step(x, 0x00cc, 6); \ - bit_permute_step_simple(x, 0x0f0f, 4); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM1_INNER(_x); \ - (x) = leftRotate12_16(_x); \ - } while (0) -#define PERM1(x) PERM1_INNER(x) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM1_INNER(_x); \ - (x) = leftRotate4_16(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM1_INNER(_x); \ - (x) = leftRotate8_16(_x); \ - } while (0) - -#define INV_PERM1_INNER(x) \ - do { \ - bit_permute_step(x, 0x0505, 5); \ - bit_permute_step(x, 0x00cc, 6); \ - bit_permute_step_simple(x, 0x0f0f, 4); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate12_16(x); \ - INV_PERM1_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) INV_PERM1_INNER(x) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate4_16(x); \ - INV_PERM1_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = rightRotate8_16(x); \ - INV_PERM1_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Encrypts a 64-bit block with GIFT-64 (bit-sliced). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -static void gift64b_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word16(input); - s1 = be_load_word16(input + 2); - s2 = be_load_word16(input + 4); - s3 = be_load_word16(input + 6); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - - /* Perform all 28 rounds */ - for (round = 0; round < 28; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 64-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); -} - -/** - * \brief Decrypts a 64-bit block with GIFT-64 (bit-sliced). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -static void gift64b_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word16(input); - s1 = be_load_word16(input + 2); - s2 = be_load_word16(input + 4); - s3 = be_load_word16(input + 6); - - /* Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 28; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 7 times for the full 28 rounds. The overall - * effect is to apply a "14 right and 28 left" bit-rotation to every word - * in the key schedule. That is equivalent to "14 right and 12 left" - * on the 16-bit sub-words. - */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | - ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); - w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | - ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); - w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | - ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); - w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | - ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); - - /* Perform all 28 rounds */ - for (round = 28; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); -} - -void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian byte order from the LOTUS-AEAD submission */ - ks->k[0] = le_load_word32(key + 12); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key); -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step_32(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Converts the GIFT-64 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift64n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1; - - /* Load the input buffer into 32-bit words. We use the nibble order from - * the LOTUS-AEAD submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-64 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 4); - s1 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step_32(x, 0x0a0a0a0a, 3); \ - bit_permute_step_32(x, 0x00cc00cc, 6); \ - bit_permute_step_32(x, 0x0000f0f0, 12); \ - bit_permute_step_32(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)(s0 >> 8); - output[3] = (uint8_t)(s1 >> 8); - output[4] = (uint8_t)(s0 >> 16); - output[5] = (uint8_t)(s1 >> 16); - output[6] = (uint8_t)(s0 >> 24); - output[7] = (uint8_t)(s1 >> 24); -} - -/** - * \brief Converts the GIFT-64 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift64n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s1 contains the least significant */ - s0 = (((uint32_t)(input[6])) << 24) | - (((uint32_t)(input[4])) << 16) | - (((uint32_t)(input[2])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[7])) << 24) | - (((uint32_t)(input[5])) << 16) | - (((uint32_t)(input[3])) << 8) | - ((uint32_t)(input[1])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step_32(x, 0x00aa00aa, 7); \ - bit_permute_step_32(x, 0x0000cccc, 14); \ - bit_permute_step_32(x, 0x00f000f0, 4); \ - bit_permute_step_32(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 4, s0); - le_store_word32(output, s1); -} - -void gift64n_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift64n_to_words(output, input); - gift64b_encrypt(ks, output, output); - gift64n_to_nibbles(output, output); -} - -void gift64n_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift64n_to_words(output, input); - gift64b_decrypt(ks, output, output); - gift64n_to_nibbles(output, output); -} - -void gift64t_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift64n_to_words(output, input); - s0 = be_load_word16(output); - s1 = be_load_word16(output + 2); - s2 = be_load_word16(output + 4); - s3 = be_load_word16(output + 6); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - - /* Perform all 28 rounds */ - for (round = 0; round < 28; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 64-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round]; - - /* AddTweak - XOR in the tweak every 4 rounds except the last */ - if (((round + 1) % 4) == 0 && round < 27) - s2 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); - gift64n_to_nibbles(output, output); -} - -void gift64t_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift64n_to_words(output, input); - s0 = be_load_word16(output); - s1 = be_load_word16(output + 2); - s2 = be_load_word16(output + 4); - s3 = be_load_word16(output + 6); - - /* Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 28; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 7 times for the full 28 rounds. The overall - * effect is to apply a "14 right and 28 left" bit-rotation to every word - * in the key schedule. That is equivalent to "14 right and 12 left" - * on the 16-bit sub-words. - */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | - ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); - w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | - ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); - w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | - ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); - w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | - ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); - - /* Perform all 28 rounds */ - for (round = 28; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 4 rounds except the last */ - if ((round % 4) == 0 && round != 28) - s2 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); - gift64n_to_nibbles(output, output); -} - -#endif /* GIFT64_LOW_MEMORY */ diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.h b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.h deleted file mode 100644 index 010359b..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-gift64.h +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT64_H -#define LW_INTERNAL_GIFT64_H - -/** - * \file internal-gift64.h - * \brief GIFT-64 block cipher. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \var GIFT64_LOW_MEMORY - * \brief Define this to 1 to use a low memory version of the key schedule. - * - * The default is to use the fix-sliced version of GIFT-64 which is very - * fast on 32-bit platforms but requires 48 bytes to store the key schedule. - * The large key schedule may be a problem on 8-bit and 16-bit platforms. - * The fix-sliced version also encrypts two blocks at a time in 32-bit - * words which is an unnecessary optimization for 8-bit platforms. - * - * GIFT64_LOW_MEMORY can be defined to 1 to select the original non - * fix-sliced version which only requires 16 bytes to store the key, - * with the rest of the key schedule expanded on the fly. - */ -#if !defined(GIFT64_LOW_MEMORY) -#if defined(__AVR__) -#define GIFT64_LOW_MEMORY 1 -#else -#define GIFT64_LOW_MEMORY 0 -#endif -#endif - -/** - * \brief Size of a GIFT-64 block in bytes. - */ -#define GIFT64_BLOCK_SIZE 8 - -/** - * \brief Structure of the key schedule for GIFT-64. - */ -typedef struct -{ - uint32_t k[4]; /**< Words of the key schedule */ -#if !GIFT64_LOW_MEMORY - uint32_t rk[8]; /**< Pre-computed round keys for fixsliced form */ -#endif - -} gift64n_key_schedule_t; - -/** - * \fn void gift64n_update_round_keys(gift64n_key_schedule_t *ks); - * \brief Updates the round keys after a change in the base key. - * - * \param ks Points to the key schedule to update. - */ -#if GIFT64_LOW_MEMORY -#define gift64n_update_round_keys(ks) do { ; } while (0) /* Not needed */ -#else -void gift64n_update_round_keys(gift64n_key_schedule_t *ks); -#endif - -/** - * \brief Initializes the key schedule for GIFT-64 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 64-bit block with GIFT-64 (nibble-based). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift64n_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 64-bit block with GIFT-64 (nibble-based). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift64n_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 16-bit for TweGIFT-64 */ -#define GIFT64T_TWEAK_0 0x0000 /**< TweGIFT-64 tweak value 0 */ -#define GIFT64T_TWEAK_1 0xe1e1 /**< TweGIFT-64 tweak value 1 */ -#define GIFT64T_TWEAK_2 0xd2d2 /**< TweGIFT-64 tweak value 2 */ -#define GIFT64T_TWEAK_3 0x3333 /**< TweGIFT-64 tweak value 3 */ -#define GIFT64T_TWEAK_4 0xb4b4 /**< TweGIFT-64 tweak value 4 */ -#define GIFT64T_TWEAK_5 0x5555 /**< TweGIFT-64 tweak value 5 */ -#define GIFT64T_TWEAK_6 0x6666 /**< TweGIFT-64 tweak value 6 */ -#define GIFT64T_TWEAK_7 0x8787 /**< TweGIFT-64 tweak value 7 */ -#define GIFT64T_TWEAK_8 0x7878 /**< TweGIFT-64 tweak value 8 */ -#define GIFT64T_TWEAK_9 0x9999 /**< TweGIFT-64 tweak value 9 */ -#define GIFT64T_TWEAK_10 0xaaaa /**< TweGIFT-64 tweak value 10 */ -#define GIFT64T_TWEAK_11 0x4b4b /**< TweGIFT-64 tweak value 11 */ -#define GIFT64T_TWEAK_12 0xcccc /**< TweGIFT-64 tweak value 12 */ -#define GIFT64T_TWEAK_13 0x2d2d /**< TweGIFT-64 tweak value 13 */ -#define GIFT64T_TWEAK_14 0x1e1e /**< TweGIFT-64 tweak value 14 */ -#define GIFT64T_TWEAK_15 0xffff /**< TweGIFT-64 tweak value 15 */ - -/** - * \brief Encrypts a 64-bit block with TweGIFT-64 (tweakable variant). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value expanded to 16-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-64 is used by the LOTUS/LOCUS submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift64n_encrypt(). - */ -void gift64t_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak); - -/** - * \brief Decrypts a 64-bit block with TweGIFT-64 (tweakable variant). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value expanded to 16-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-64 is used by the LOTUS/LOCUS submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift64n_decrypt(). - */ -void gift64t_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-util.h b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.c b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.c deleted file mode 100644 index 4a1efd0..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.c +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "lotus-locus.h" -#include "internal-gift64.h" -#include "internal-util.h" -#include - -aead_cipher_t const lotus_aead_cipher = { - "LOTUS-AEAD", - LOTUS_AEAD_KEY_SIZE, - LOTUS_AEAD_NONCE_SIZE, - LOTUS_AEAD_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - lotus_aead_encrypt, - lotus_aead_decrypt -}; - -aead_cipher_t const locus_aead_cipher = { - "LOCUS-AEAD", - LOCUS_AEAD_KEY_SIZE, - LOCUS_AEAD_NONCE_SIZE, - LOCUS_AEAD_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - locus_aead_encrypt, - locus_aead_decrypt -}; - -/** - * \brief Multiplies a key by 2 in the GF(128) field. - * - * \param ks The key schedule structure containing the key in host byte order. - */ -STATIC_INLINE void lotus_or_locus_mul_2(gift64n_key_schedule_t *ks) -{ - uint32_t mask = (uint32_t)(((int32_t)(ks->k[0])) >> 31); - ks->k[0] = (ks->k[0] << 1) | (ks->k[1] >> 31); - ks->k[1] = (ks->k[1] << 1) | (ks->k[2] >> 31); - ks->k[2] = (ks->k[2] << 1) | (ks->k[3] >> 31); - ks->k[3] = (ks->k[3] << 1) ^ (mask & 0x87); - gift64n_update_round_keys(ks); -} - -/** - * \brief Initializes a LOTUS-AEAD or LOCUS-AEAD cipher instance. - * - * \param ks Key schedule to initialize. - * \param deltaN Delta-N value for the cipher state. - * \param key Points to the 16-byte key for the cipher instance. - * \param nonce Points to the 16-byte key for the cipher instance. - * \param T Points to a temporary buffer of LOTUS_AEAD_KEY_SIZE bytes - * that will be destroyed during this function. - */ -static void lotus_or_locus_init - (gift64n_key_schedule_t *ks, - unsigned char deltaN[GIFT64_BLOCK_SIZE], - const unsigned char *key, - const unsigned char *nonce, - unsigned char *T) -{ - gift64n_init(ks, key); - memset(deltaN, 0, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_0); - lw_xor_block_2_src(T, key, nonce, LOTUS_AEAD_KEY_SIZE); - gift64n_init(ks, T); - gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_1); -} - -/** - * \brief Processes associated data for LOTUS-AEAD or LOCUS-AEAD. - * - * \param ks Points to the key schedule. - * \param deltaN Points to the Delta-N value from the state. - * \param V Points to the V value from the state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void lotus_or_locus_process_ad - (gift64n_key_schedule_t *ks, - const unsigned char deltaN[GIFT64_BLOCK_SIZE], - unsigned char V[GIFT64_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char X[GIFT64_BLOCK_SIZE]; - unsigned char temp; - while (adlen > GIFT64_BLOCK_SIZE) { - lotus_or_locus_mul_2(ks); - lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); - lw_xor_block(V, X, GIFT64_BLOCK_SIZE); - ad += GIFT64_BLOCK_SIZE; - adlen -= GIFT64_BLOCK_SIZE; - } - lotus_or_locus_mul_2(ks); - temp = (unsigned)adlen; - if (temp < GIFT64_BLOCK_SIZE) { - memcpy(X, deltaN, GIFT64_BLOCK_SIZE); - lw_xor_block(X, ad, temp); - X[temp] ^= 0x01; - gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_3); - } else { - lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); - } - lw_xor_block(V, X, GIFT64_BLOCK_SIZE); -} - -/** - * \brief Generates the authentication tag for LOTUS-AEAD or LOCUS-AEAD. - * - * \param ks Points to the key schedule. - * \param tag Points to the buffer to receive the authentication tag. - * \param deltaN Points to the Delta-N value from the state. - * \param W Points to the W value from the state. - * \param V Points to the V value from the state. - */ -static void lotus_or_locus_gen_tag - (gift64n_key_schedule_t *ks, unsigned char *tag, - unsigned char deltaN[GIFT64_BLOCK_SIZE], - unsigned char W[GIFT64_BLOCK_SIZE], - unsigned char V[GIFT64_BLOCK_SIZE]) -{ - lotus_or_locus_mul_2(ks); - lw_xor_block(W, deltaN, GIFT64_BLOCK_SIZE); - lw_xor_block(W, V, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, W, W, GIFT64T_TWEAK_6); - lw_xor_block_2_src(tag, W, deltaN, GIFT64_BLOCK_SIZE); -} - -int lotus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X1[GIFT64_BLOCK_SIZE]; - unsigned char X2[GIFT64_BLOCK_SIZE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + LOTUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > (GIFT64_BLOCK_SIZE * 2)) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X1, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_4); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); - lw_xor_block_2_src - (X2, m + GIFT64_BLOCK_SIZE, X2, GIFT64_BLOCK_SIZE); - lw_xor_block_2_src(c, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); - lw_xor_block_2_src - (c + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE * 2; - m += GIFT64_BLOCK_SIZE * 2; - mlen -= GIFT64_BLOCK_SIZE * 2; - } - temp = (unsigned)mlen; - lotus_or_locus_mul_2(&ks); - memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); - X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); - if (temp <= GIFT64_BLOCK_SIZE) { - lw_xor_block(WV, m, temp); - lw_xor_block(X2, m, temp); - lw_xor_block_2_src(c, X2, deltaN, temp); - } else { - lw_xor_block(X2, m, GIFT64_BLOCK_SIZE); - lw_xor_block_2_src(c, X2, deltaN, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(WV, m, temp); - lw_xor_block(X1, X2, temp); - lw_xor_block_2_src(c, X1, m, temp); - } - c += temp; - } - - /* Generate the authentication tag */ - lotus_or_locus_gen_tag(&ks, c, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return 0; -} - -int lotus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X1[GIFT64_BLOCK_SIZE]; - unsigned char X2[GIFT64_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < LOTUS_AEAD_TAG_SIZE) - return -1; - *mlen = clen - LOTUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= LOTUS_AEAD_TAG_SIZE; - if (clen > 0) { - while (clen > (GIFT64_BLOCK_SIZE * 2)) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X1, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_5); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); - lw_xor_block(X2, c + GIFT64_BLOCK_SIZE, GIFT64_BLOCK_SIZE); - lw_xor_block_2_src(m, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); - lw_xor_block_2_src - (m + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE * 2; - m += GIFT64_BLOCK_SIZE * 2; - clen -= GIFT64_BLOCK_SIZE * 2; - } - temp = (unsigned)clen; - lotus_or_locus_mul_2(&ks); - memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); - X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); - if (temp <= GIFT64_BLOCK_SIZE) { - lw_xor_block_2_src(m, X2, c, temp); - lw_xor_block(m, deltaN, temp); - lw_xor_block(X2, m, temp); - lw_xor_block(WV, m, temp); - } else { - lw_xor_block_2_src(m, X2, c, GIFT64_BLOCK_SIZE); - lw_xor_block(m, deltaN, GIFT64_BLOCK_SIZE); - lw_xor_block(X2, m, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(X1, X2, temp); - lw_xor_block_2_src(m, X1, c, temp); - lw_xor_block(WV, m, temp); - } - c += temp; - } - - /* Check the authentication tag */ - lotus_or_locus_gen_tag(&ks, WV, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return aead_check_tag(mtemp, *mlen, WV, c, LOTUS_AEAD_TAG_SIZE); -} - -int locus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X[GIFT64_BLOCK_SIZE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + LOCUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > GIFT64_BLOCK_SIZE) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block_2_src(c, X, deltaN, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - mlen -= GIFT64_BLOCK_SIZE; - } - temp = (unsigned)mlen; - lotus_or_locus_mul_2(&ks); - memcpy(X, deltaN, GIFT64_BLOCK_SIZE); - X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - lw_xor_block(WV, m, temp); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(X, deltaN, temp); - lw_xor_block_2_src(c, m, X, temp); - c += temp; - } - - /* Generate the authentication tag */ - lotus_or_locus_gen_tag(&ks, c, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return 0; -} - -int locus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X[GIFT64_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < LOCUS_AEAD_TAG_SIZE) - return -1; - *mlen = clen - LOCUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= LOCUS_AEAD_TAG_SIZE; - if (clen > 0) { - while (clen > GIFT64_BLOCK_SIZE) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block_2_src(m, X, deltaN, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - clen -= GIFT64_BLOCK_SIZE; - } - temp = (unsigned)clen; - lotus_or_locus_mul_2(&ks); - memcpy(X, deltaN, GIFT64_BLOCK_SIZE); - X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(X, deltaN, temp); - lw_xor_block_2_src(m, c, X, temp); - lw_xor_block(WV, m, temp); - c += temp; - } - - /* Check the authentication tag */ - lotus_or_locus_gen_tag(&ks, WV, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return aead_check_tag(mtemp, *mlen, WV, c, LOCUS_AEAD_TAG_SIZE); -} diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.h b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.h deleted file mode 100644 index 85434a8..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys-avr/lotus-locus.h +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_LOTUS_LOCUS_H -#define LWCRYPTO_LOTUS_LOCUS_H - -#include "aead-common.h" - -/** - * \file lotus-locus.h - * \brief LOTUS-AEAD and LOCUS-AEAD authenticated encryption algorithms. - * - * LOTUS-AEAD and LOCUS-AEAD are authenticated encryption algorithms - * that are based around a tweakable variant of the GIFT-64 block cipher - * called TweGIFT-64. Both AEAD algorithms have a 128-bit key, a 128-bit - * nonce, and a 64-bit tag. - * - * The two algorithms have the same key initialization, associated data - * processing, and tag generation mechanisms. They differ in how the - * input is encrypted with TweGIFT-64. - * - * LOTUS-AEAD uses a method similar to the block cipher mode OTR. - * TweGIFT-64 is essentially converted into a 128-bit block cipher - * using a Feistel construction and four TweGIFT-64 block operations - * every 16 bytes of input. - * - * LOCUS-AEAD uses a method similar to the block cipher mode OCB - * with two TweGIFT-64 block operations for every 8 bytes of input. - * LOCUS-AEAD requires both the block encrypt and block decrypt - * operations of TweGIFT-64, which increases the overall code size. - * LOTUS-AEAD only needs the block encrypt operation. - * - * LOTUS-AEAD is the primary member of the family. - * - * References: https://www.isical.ac.in/~lightweight/lotus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for LOTUS-AEAD. - */ -#define LOTUS_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for LOTUS-AEAD. - */ -#define LOTUS_AEAD_TAG_SIZE 8 - -/** - * \brief Size of the nonce for LOTUS-AEAD. - */ -#define LOTUS_AEAD_NONCE_SIZE 16 - -/** - * \brief Size of the key for LOCUS-AEAD. - */ -#define LOCUS_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for LOCUS-AEAD. - */ -#define LOCUS_AEAD_TAG_SIZE 8 - -/** - * \brief Size of the nonce for LOCUS-AEAD. - */ -#define LOCUS_AEAD_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the LOTUS-AEAD cipher. - */ -extern aead_cipher_t const lotus_aead_cipher; - -/** - * \brief Meta-information block for the LOCUS-AEAD cipher. - */ -extern aead_cipher_t const locus_aead_cipher; - -/** - * \brief Encrypts and authenticates a packet with LOTUS-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa lotus_aead_decrypt() - */ -int lotus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with LOTUS-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 9 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa lotus_aead_encrypt() - */ -int lotus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with LOCUS-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa locus_aead_decrypt() - */ -int locus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with LOCUS-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 9 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa locus_aead_encrypt() - */ -int locus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64-avr.S b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64-avr.S new file mode 100644 index 0000000..fdb668d --- /dev/null +++ b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64-avr.S @@ -0,0 +1,6047 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gift64n_init + .type gift64n_init, @function +gift64n_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ret + .size gift64n_init, .-gift64n_init + + .text +.global gift64n_encrypt + .type gift64n_encrypt, @function +gift64n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 28 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Z+4 + ldd r7,Z+5 + ldd r8,Z+6 + ldd r9,Z+7 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Z+12 + ldd r7,Z+13 + ldd r8,Z+14 + ldd r9,Z+15 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,0 + bst r18,1 + bld r22,0 + bst r18,2 + bld r2,0 + bst r18,3 + bld r4,0 + bst r18,4 + bld r20,1 + bst r18,5 + bld r22,1 + bst r18,6 + bld r2,1 + bst r18,7 + bld r4,1 + bst r19,0 + bld r20,2 + bst r19,1 + bld r22,2 + bst r19,2 + bld r2,2 + bst r19,3 + bld r4,2 + bst r19,4 + bld r20,3 + bst r19,5 + bld r22,3 + bst r19,6 + bld r2,3 + bst r19,7 + bld r4,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,4 + bst r18,1 + bld r22,4 + bst r18,2 + bld r2,4 + bst r18,3 + bld r4,4 + bst r18,4 + bld r20,5 + bst r18,5 + bld r22,5 + bst r18,6 + bld r2,5 + bst r18,7 + bld r4,5 + bst r19,0 + bld r20,6 + bst r19,1 + bld r22,6 + bst r19,2 + bld r2,6 + bst r19,3 + bld r4,6 + bst r19,4 + bld r20,7 + bst r19,5 + bld r22,7 + bst r19,6 + bld r2,7 + bst r19,7 + bld r4,7 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,0 + bst r18,1 + bld r23,0 + bst r18,2 + bld r3,0 + bst r18,3 + bld r5,0 + bst r18,4 + bld r21,1 + bst r18,5 + bld r23,1 + bst r18,6 + bld r3,1 + bst r18,7 + bld r5,1 + bst r19,0 + bld r21,2 + bst r19,1 + bld r23,2 + bst r19,2 + bld r3,2 + bst r19,3 + bld r5,2 + bst r19,4 + bld r21,3 + bst r19,5 + bld r23,3 + bst r19,6 + bld r3,3 + bst r19,7 + bld r5,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,4 + bst r18,1 + bld r23,4 + bst r18,2 + bld r3,4 + bst r18,3 + bld r5,4 + bst r18,4 + bld r21,5 + bst r18,5 + bld r23,5 + bst r18,6 + bld r3,5 + bst r18,7 + bld r5,5 + bst r19,0 + bld r21,6 + bst r19,1 + bld r23,6 + bst r19,2 + bld r3,6 + bst r19,3 + bld r5,6 + bst r19,4 + bld r21,7 + bst r19,5 + bld r23,7 + bst r19,6 + bld r3,7 + bst r19,7 + bld r5,7 + rcall 1061f + ldi r18,1 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,3 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,7 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,15 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,31 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,62 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,61 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,59 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,55 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,47 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,30 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,60 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,57 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,51 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,39 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,14 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,29 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,58 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,53 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,43 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,22 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,44 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,24 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,48 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,33 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,2 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,5 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,11 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rjmp 1252f +1061: + mov r0,r20 + and r0,r2 + eor r22,r0 + mov r0,r21 + and r0,r3 + eor r23,r0 + mov r0,r22 + and r0,r4 + eor r20,r0 + mov r0,r23 + and r0,r5 + eor r21,r0 + mov r0,r20 + or r0,r22 + eor r2,r0 + mov r0,r21 + or r0,r23 + eor r3,r0 + eor r4,r2 + eor r5,r3 + eor r22,r4 + eor r23,r5 + com r4 + com r5 + movw r18,r20 + mov r0,r22 + and r0,r18 + eor r2,r0 + mov r0,r23 + and r0,r19 + eor r3,r0 + movw r20,r4 + movw r4,r18 + bst r20,1 + bld r0,0 + bst r20,4 + bld r20,1 + bst r20,3 + bld r20,4 + bst r21,4 + bld r20,3 + bst r0,0 + bld r21,4 + bst r20,2 + bld r0,0 + bst r21,0 + bld r20,2 + bst r0,0 + bld r21,0 + bst r20,5 + bld r0,0 + bst r20,7 + bld r20,5 + bst r21,7 + bld r20,7 + bst r21,5 + bld r21,7 + bst r0,0 + bld r21,5 + bst r20,6 + bld r0,0 + bst r21,3 + bld r20,6 + bst r21,6 + bld r21,3 + bst r21,1 + bld r21,6 + bst r0,0 + bld r21,1 + bst r22,0 + bld r0,0 + bst r22,1 + bld r22,0 + bst r22,5 + bld r22,1 + bst r22,4 + bld r22,5 + bst r0,0 + bld r22,4 + bst r22,2 + bld r0,0 + bst r23,1 + bld r22,2 + bst r22,7 + bld r23,1 + bst r23,4 + bld r22,7 + bst r0,0 + bld r23,4 + bst r22,3 + bld r0,0 + bst r23,5 + bld r22,3 + bst r22,6 + bld r23,5 + bst r23,0 + bld r22,6 + bst r0,0 + bld r23,0 + bst r23,2 + bld r0,0 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r23,6 + bld r23,7 + bst r0,0 + bld r23,6 + bst r2,0 + bld r0,0 + bst r2,2 + bld r2,0 + bst r3,2 + bld r2,2 + bst r3,0 + bld r3,2 + bst r0,0 + bld r3,0 + bst r2,1 + bld r0,0 + bst r2,6 + bld r2,1 + bst r3,1 + bld r2,6 + bst r2,4 + bld r3,1 + bst r0,0 + bld r2,4 + bst r2,3 + bld r0,0 + bst r3,6 + bld r2,3 + bst r3,3 + bld r3,6 + bst r3,4 + bld r3,3 + bst r0,0 + bld r3,4 + bst r2,7 + bld r0,0 + bst r3,5 + bld r2,7 + bst r0,0 + bld r3,5 + bst r4,0 + bld r0,0 + bst r4,3 + bld r4,0 + bst r5,7 + bld r4,3 + bst r5,4 + bld r5,7 + bst r0,0 + bld r5,4 + bst r4,1 + bld r0,0 + bst r4,7 + bld r4,1 + bst r5,6 + bld r4,7 + bst r5,0 + bld r5,6 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,3 + bld r4,2 + bst r5,5 + bld r5,3 + bst r4,4 + bld r5,5 + bst r0,0 + bld r4,4 + bst r4,5 + bld r0,0 + bst r4,6 + bld r4,5 + bst r5,2 + bld r4,6 + bst r5,1 + bld r5,2 + bst r0,0 + bld r5,1 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + ret +1252: + ldd r26,Y+17 + ldd r27,Y+18 + bst r20,0 + bld r18,0 + bst r22,0 + bld r18,1 + bst r2,0 + bld r18,2 + bst r4,0 + bld r18,3 + bst r20,1 + bld r18,4 + bst r22,1 + bld r18,5 + bst r2,1 + bld r18,6 + bst r4,1 + bld r18,7 + bst r20,2 + bld r19,0 + bst r22,2 + bld r19,1 + bst r2,2 + bld r19,2 + bst r4,2 + bld r19,3 + bst r20,3 + bld r19,4 + bst r22,3 + bld r19,5 + bst r2,3 + bld r19,6 + bst r4,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r20,4 + bld r18,0 + bst r22,4 + bld r18,1 + bst r2,4 + bld r18,2 + bst r4,4 + bld r18,3 + bst r20,5 + bld r18,4 + bst r22,5 + bld r18,5 + bst r2,5 + bld r18,6 + bst r4,5 + bld r18,7 + bst r20,6 + bld r19,0 + bst r22,6 + bld r19,1 + bst r2,6 + bld r19,2 + bst r4,6 + bld r19,3 + bst r20,7 + bld r19,4 + bst r22,7 + bld r19,5 + bst r2,7 + bld r19,6 + bst r4,7 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,0 + bld r18,0 + bst r23,0 + bld r18,1 + bst r3,0 + bld r18,2 + bst r5,0 + bld r18,3 + bst r21,1 + bld r18,4 + bst r23,1 + bld r18,5 + bst r3,1 + bld r18,6 + bst r5,1 + bld r18,7 + bst r21,2 + bld r19,0 + bst r23,2 + bld r19,1 + bst r3,2 + bld r19,2 + bst r5,2 + bld r19,3 + bst r21,3 + bld r19,4 + bst r23,3 + bld r19,5 + bst r3,3 + bld r19,6 + bst r5,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,4 + bld r18,0 + bst r23,4 + bld r18,1 + bst r3,4 + bld r18,2 + bst r5,4 + bld r18,3 + bst r21,5 + bld r18,4 + bst r23,5 + bld r18,5 + bst r3,5 + bld r18,6 + bst r5,5 + bld r18,7 + bst r21,6 + bld r19,0 + bst r23,6 + bld r19,1 + bst r3,6 + bld r19,2 + bst r5,6 + bld r19,3 + bst r21,7 + bld r19,4 + bst r23,7 + bld r19,5 + bst r3,7 + bld r19,6 + bst r5,7 + bld r19,7 + st X+,r18 + st X+,r19 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64n_encrypt, .-gift64n_encrypt + + .text +.global gift64n_decrypt + .type gift64n_decrypt, @function +gift64n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 28 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Z+4 + ldd r7,Z+5 + ldd r8,Z+6 + ldd r9,Z+7 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Z+12 + ldd r7,Z+13 + ldd r8,Z+14 + ldd r9,Z+15 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,0 + bst r18,1 + bld r22,0 + bst r18,2 + bld r2,0 + bst r18,3 + bld r4,0 + bst r18,4 + bld r20,1 + bst r18,5 + bld r22,1 + bst r18,6 + bld r2,1 + bst r18,7 + bld r4,1 + bst r19,0 + bld r20,2 + bst r19,1 + bld r22,2 + bst r19,2 + bld r2,2 + bst r19,3 + bld r4,2 + bst r19,4 + bld r20,3 + bst r19,5 + bld r22,3 + bst r19,6 + bld r2,3 + bst r19,7 + bld r4,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,4 + bst r18,1 + bld r22,4 + bst r18,2 + bld r2,4 + bst r18,3 + bld r4,4 + bst r18,4 + bld r20,5 + bst r18,5 + bld r22,5 + bst r18,6 + bld r2,5 + bst r18,7 + bld r4,5 + bst r19,0 + bld r20,6 + bst r19,1 + bld r22,6 + bst r19,2 + bld r2,6 + bst r19,3 + bld r4,6 + bst r19,4 + bld r20,7 + bst r19,5 + bld r22,7 + bst r19,6 + bld r2,7 + bst r19,7 + bld r4,7 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,0 + bst r18,1 + bld r23,0 + bst r18,2 + bld r3,0 + bst r18,3 + bld r5,0 + bst r18,4 + bld r21,1 + bst r18,5 + bld r23,1 + bst r18,6 + bld r3,1 + bst r18,7 + bld r5,1 + bst r19,0 + bld r21,2 + bst r19,1 + bld r23,2 + bst r19,2 + bld r3,2 + bst r19,3 + bld r5,2 + bst r19,4 + bld r21,3 + bst r19,5 + bld r23,3 + bst r19,6 + bld r3,3 + bst r19,7 + bld r5,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,4 + bst r18,1 + bld r23,4 + bst r18,2 + bld r3,4 + bst r18,3 + bld r5,4 + bst r18,4 + bld r21,5 + bst r18,5 + bld r23,5 + bst r18,6 + bld r3,5 + bst r18,7 + bld r5,5 + bst r19,0 + bld r21,6 + bst r19,1 + bld r23,6 + bst r19,2 + bld r3,6 + bst r19,3 + bld r5,6 + bst r19,4 + bld r21,7 + bst r19,5 + bld r23,7 + bst r19,6 + bld r3,7 + bst r19,7 + bld r5,7 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,11 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,5 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,2 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,33 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,48 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,24 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,44 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,22 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,43 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,53 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,58 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,29 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,14 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,39 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,51 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,57 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,60 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,30 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,47 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,55 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,59 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,61 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,62 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,31 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,15 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,7 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,3 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,1 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + rjmp 1362f +1173: + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + bst r20,1 + bld r0,0 + bst r21,4 + bld r20,1 + bst r20,3 + bld r21,4 + bst r20,4 + bld r20,3 + bst r0,0 + bld r20,4 + bst r20,2 + bld r0,0 + bst r21,0 + bld r20,2 + bst r0,0 + bld r21,0 + bst r20,5 + bld r0,0 + bst r21,5 + bld r20,5 + bst r21,7 + bld r21,5 + bst r20,7 + bld r21,7 + bst r0,0 + bld r20,7 + bst r20,6 + bld r0,0 + bst r21,1 + bld r20,6 + bst r21,6 + bld r21,1 + bst r21,3 + bld r21,6 + bst r0,0 + bld r21,3 + bst r22,0 + bld r0,0 + bst r22,4 + bld r22,0 + bst r22,5 + bld r22,4 + bst r22,1 + bld r22,5 + bst r0,0 + bld r22,1 + bst r22,2 + bld r0,0 + bst r23,4 + bld r22,2 + bst r22,7 + bld r23,4 + bst r23,1 + bld r22,7 + bst r0,0 + bld r23,1 + bst r22,3 + bld r0,0 + bst r23,0 + bld r22,3 + bst r22,6 + bld r23,0 + bst r23,5 + bld r22,6 + bst r0,0 + bld r23,5 + bst r23,2 + bld r0,0 + bst r23,6 + bld r23,2 + bst r23,7 + bld r23,6 + bst r23,3 + bld r23,7 + bst r0,0 + bld r23,3 + bst r2,0 + bld r0,0 + bst r3,0 + bld r2,0 + bst r3,2 + bld r3,0 + bst r2,2 + bld r3,2 + bst r0,0 + bld r2,2 + bst r2,1 + bld r0,0 + bst r2,4 + bld r2,1 + bst r3,1 + bld r2,4 + bst r2,6 + bld r3,1 + bst r0,0 + bld r2,6 + bst r2,3 + bld r0,0 + bst r3,4 + bld r2,3 + bst r3,3 + bld r3,4 + bst r3,6 + bld r3,3 + bst r0,0 + bld r3,6 + bst r2,7 + bld r0,0 + bst r3,5 + bld r2,7 + bst r0,0 + bld r3,5 + bst r4,0 + bld r0,0 + bst r5,4 + bld r4,0 + bst r5,7 + bld r5,4 + bst r4,3 + bld r5,7 + bst r0,0 + bld r4,3 + bst r4,1 + bld r0,0 + bst r5,0 + bld r4,1 + bst r5,6 + bld r5,0 + bst r4,7 + bld r5,6 + bst r0,0 + bld r4,7 + bst r4,2 + bld r0,0 + bst r4,4 + bld r4,2 + bst r5,5 + bld r4,4 + bst r5,3 + bld r5,5 + bst r0,0 + bld r5,3 + bst r4,5 + bld r0,0 + bst r5,1 + bld r4,5 + bst r5,2 + bld r5,1 + bst r4,6 + bld r5,2 + bst r0,0 + bld r4,6 + movw r18,r4 + movw r4,r20 + movw r20,r18 + and r18,r22 + and r19,r23 + eor r2,r18 + eor r3,r19 + com r4 + com r5 + eor r22,r4 + eor r23,r5 + eor r4,r2 + eor r5,r3 + mov r0,r20 + or r0,r22 + eor r2,r0 + mov r0,r21 + or r0,r23 + eor r3,r0 + mov r0,r22 + and r0,r4 + eor r20,r0 + mov r0,r23 + and r0,r5 + eor r21,r0 + mov r0,r20 + and r0,r2 + eor r22,r0 + mov r0,r21 + and r0,r3 + eor r23,r0 + ret +1362: + ldd r26,Y+17 + ldd r27,Y+18 + bst r20,0 + bld r18,0 + bst r22,0 + bld r18,1 + bst r2,0 + bld r18,2 + bst r4,0 + bld r18,3 + bst r20,1 + bld r18,4 + bst r22,1 + bld r18,5 + bst r2,1 + bld r18,6 + bst r4,1 + bld r18,7 + bst r20,2 + bld r19,0 + bst r22,2 + bld r19,1 + bst r2,2 + bld r19,2 + bst r4,2 + bld r19,3 + bst r20,3 + bld r19,4 + bst r22,3 + bld r19,5 + bst r2,3 + bld r19,6 + bst r4,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r20,4 + bld r18,0 + bst r22,4 + bld r18,1 + bst r2,4 + bld r18,2 + bst r4,4 + bld r18,3 + bst r20,5 + bld r18,4 + bst r22,5 + bld r18,5 + bst r2,5 + bld r18,6 + bst r4,5 + bld r18,7 + bst r20,6 + bld r19,0 + bst r22,6 + bld r19,1 + bst r2,6 + bld r19,2 + bst r4,6 + bld r19,3 + bst r20,7 + bld r19,4 + bst r22,7 + bld r19,5 + bst r2,7 + bld r19,6 + bst r4,7 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,0 + bld r18,0 + bst r23,0 + bld r18,1 + bst r3,0 + bld r18,2 + bst r5,0 + bld r18,3 + bst r21,1 + bld r18,4 + bst r23,1 + bld r18,5 + bst r3,1 + bld r18,6 + bst r5,1 + bld r18,7 + bst r21,2 + bld r19,0 + bst r23,2 + bld r19,1 + bst r3,2 + bld r19,2 + bst r5,2 + bld r19,3 + bst r21,3 + bld r19,4 + bst r23,3 + bld r19,5 + bst r3,3 + bld r19,6 + bst r5,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,4 + bld r18,0 + bst r23,4 + bld r18,1 + bst r3,4 + bld r18,2 + bst r5,4 + bld r18,3 + bst r21,5 + bld r18,4 + bst r23,5 + bld r18,5 + bst r3,5 + bld r18,6 + bst r5,5 + bld r18,7 + bst r21,6 + bld r19,0 + bst r23,6 + bld r19,1 + bst r3,6 + bld r19,2 + bst r5,6 + bld r19,3 + bst r21,7 + bld r19,4 + bst r23,7 + bld r19,5 + bst r3,7 + bld r19,6 + bst r5,7 + bld r19,7 + st X+,r18 + st X+,r19 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64n_decrypt, .-gift64n_decrypt + + .text +.global gift64t_encrypt + .type gift64t_encrypt, @function +gift64t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 30 + ld r8,Z + ldd r9,Z+1 + ldd r10,Z+2 + ldd r11,Z+3 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,0 + bst r20,1 + bld r2,0 + bst r20,2 + bld r4,0 + bst r20,3 + bld r6,0 + bst r20,4 + bld r22,1 + bst r20,5 + bld r2,1 + bst r20,6 + bld r4,1 + bst r20,7 + bld r6,1 + bst r21,0 + bld r22,2 + bst r21,1 + bld r2,2 + bst r21,2 + bld r4,2 + bst r21,3 + bld r6,2 + bst r21,4 + bld r22,3 + bst r21,5 + bld r2,3 + bst r21,6 + bld r4,3 + bst r21,7 + bld r6,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,4 + bst r20,1 + bld r2,4 + bst r20,2 + bld r4,4 + bst r20,3 + bld r6,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r2,5 + bst r20,6 + bld r4,5 + bst r20,7 + bld r6,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r2,6 + bst r21,2 + bld r4,6 + bst r21,3 + bld r6,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r2,7 + bst r21,6 + bld r4,7 + bst r21,7 + bld r6,7 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,0 + bst r20,1 + bld r3,0 + bst r20,2 + bld r5,0 + bst r20,3 + bld r7,0 + bst r20,4 + bld r23,1 + bst r20,5 + bld r3,1 + bst r20,6 + bld r5,1 + bst r20,7 + bld r7,1 + bst r21,0 + bld r23,2 + bst r21,1 + bld r3,2 + bst r21,2 + bld r5,2 + bst r21,3 + bld r7,2 + bst r21,4 + bld r23,3 + bst r21,5 + bld r3,3 + bst r21,6 + bld r5,3 + bst r21,7 + bld r7,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,4 + bst r20,1 + bld r3,4 + bst r20,2 + bld r5,4 + bst r20,3 + bld r7,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r3,5 + bst r20,6 + bld r5,5 + bst r20,7 + bld r7,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r3,6 + bst r21,2 + bld r5,6 + bst r21,3 + bld r7,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r3,7 + bst r21,6 + bld r5,7 + bst r21,7 + bld r7,7 + rcall 1073f + ldi r20,1 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,3 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,7 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,15 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,31 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,62 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,61 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,59 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,55 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,47 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,30 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,60 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,57 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,51 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,39 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,14 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,29 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,58 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,53 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,43 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,22 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,44 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,24 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,48 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,33 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,2 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,5 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,11 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rjmp 1264f +1073: + mov r0,r22 + and r0,r4 + eor r2,r0 + mov r0,r23 + and r0,r5 + eor r3,r0 + mov r0,r2 + and r0,r6 + eor r22,r0 + mov r0,r3 + and r0,r7 + eor r23,r0 + mov r0,r22 + or r0,r2 + eor r4,r0 + mov r0,r23 + or r0,r3 + eor r5,r0 + eor r6,r4 + eor r7,r5 + eor r2,r6 + eor r3,r7 + com r6 + com r7 + movw r20,r22 + mov r0,r2 + and r0,r20 + eor r4,r0 + mov r0,r3 + and r0,r21 + eor r5,r0 + movw r22,r6 + movw r6,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r22,3 + bld r22,4 + bst r23,4 + bld r22,3 + bst r0,0 + bld r23,4 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r0,0 + bld r23,0 + bst r22,5 + bld r0,0 + bst r22,7 + bld r22,5 + bst r23,7 + bld r22,7 + bst r23,5 + bld r23,7 + bst r0,0 + bld r23,5 + bst r22,6 + bld r0,0 + bst r23,3 + bld r22,6 + bst r23,6 + bld r23,3 + bst r23,1 + bld r23,6 + bst r0,0 + bld r23,1 + bst r2,0 + bld r0,0 + bst r2,1 + bld r2,0 + bst r2,5 + bld r2,1 + bst r2,4 + bld r2,5 + bst r0,0 + bld r2,4 + bst r2,2 + bld r0,0 + bst r3,1 + bld r2,2 + bst r2,7 + bld r3,1 + bst r3,4 + bld r2,7 + bst r0,0 + bld r3,4 + bst r2,3 + bld r0,0 + bst r3,5 + bld r2,3 + bst r2,6 + bld r3,5 + bst r3,0 + bld r2,6 + bst r0,0 + bld r3,0 + bst r3,2 + bld r0,0 + bst r3,3 + bld r3,2 + bst r3,7 + bld r3,3 + bst r3,6 + bld r3,7 + bst r0,0 + bld r3,6 + bst r4,0 + bld r0,0 + bst r4,2 + bld r4,0 + bst r5,2 + bld r4,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,1 + bld r0,0 + bst r4,6 + bld r4,1 + bst r5,1 + bld r4,6 + bst r4,4 + bld r5,1 + bst r0,0 + bld r4,4 + bst r4,3 + bld r0,0 + bst r5,6 + bld r4,3 + bst r5,3 + bld r5,6 + bst r5,4 + bld r5,3 + bst r0,0 + bld r5,4 + bst r4,7 + bld r0,0 + bst r5,5 + bld r4,7 + bst r0,0 + bld r5,5 + bst r6,0 + bld r0,0 + bst r6,3 + bld r6,0 + bst r7,7 + bld r6,3 + bst r7,4 + bld r7,7 + bst r0,0 + bld r7,4 + bst r6,1 + bld r0,0 + bst r6,7 + bld r6,1 + bst r7,6 + bld r6,7 + bst r7,0 + bld r7,6 + bst r0,0 + bld r7,0 + bst r6,2 + bld r0,0 + bst r7,3 + bld r6,2 + bst r7,5 + bld r7,3 + bst r6,4 + bld r7,5 + bst r0,0 + bld r6,4 + bst r6,5 + bld r0,0 + bst r6,6 + bld r6,5 + bst r7,2 + bld r6,6 + bst r7,1 + bld r7,2 + bst r0,0 + bld r7,1 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ret +1264: + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r20,0 + bst r2,0 + bld r20,1 + bst r4,0 + bld r20,2 + bst r6,0 + bld r20,3 + bst r22,1 + bld r20,4 + bst r2,1 + bld r20,5 + bst r4,1 + bld r20,6 + bst r6,1 + bld r20,7 + bst r22,2 + bld r21,0 + bst r2,2 + bld r21,1 + bst r4,2 + bld r21,2 + bst r6,2 + bld r21,3 + bst r22,3 + bld r21,4 + bst r2,3 + bld r21,5 + bst r4,3 + bld r21,6 + bst r6,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r22,4 + bld r20,0 + bst r2,4 + bld r20,1 + bst r4,4 + bld r20,2 + bst r6,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r2,5 + bld r20,5 + bst r4,5 + bld r20,6 + bst r6,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r2,6 + bld r21,1 + bst r4,6 + bld r21,2 + bst r6,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r2,7 + bld r21,5 + bst r4,7 + bld r21,6 + bst r6,7 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,0 + bld r20,0 + bst r3,0 + bld r20,1 + bst r5,0 + bld r20,2 + bst r7,0 + bld r20,3 + bst r23,1 + bld r20,4 + bst r3,1 + bld r20,5 + bst r5,1 + bld r20,6 + bst r7,1 + bld r20,7 + bst r23,2 + bld r21,0 + bst r3,2 + bld r21,1 + bst r5,2 + bld r21,2 + bst r7,2 + bld r21,3 + bst r23,3 + bld r21,4 + bst r3,3 + bld r21,5 + bst r5,3 + bld r21,6 + bst r7,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,4 + bld r20,0 + bst r3,4 + bld r20,1 + bst r5,4 + bld r20,2 + bst r7,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r3,5 + bld r20,5 + bst r5,5 + bld r20,6 + bst r7,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r3,6 + bld r21,1 + bst r5,6 + bld r21,2 + bst r7,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r3,7 + bld r21,5 + bst r5,7 + bld r21,6 + bst r7,7 + bld r21,7 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64t_encrypt, .-gift64t_encrypt + + .text +.global gift64t_decrypt + .type gift64t_decrypt, @function +gift64t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 30 + ld r8,Z + ldd r9,Z+1 + ldd r10,Z+2 + ldd r11,Z+3 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,0 + bst r20,1 + bld r2,0 + bst r20,2 + bld r4,0 + bst r20,3 + bld r6,0 + bst r20,4 + bld r22,1 + bst r20,5 + bld r2,1 + bst r20,6 + bld r4,1 + bst r20,7 + bld r6,1 + bst r21,0 + bld r22,2 + bst r21,1 + bld r2,2 + bst r21,2 + bld r4,2 + bst r21,3 + bld r6,2 + bst r21,4 + bld r22,3 + bst r21,5 + bld r2,3 + bst r21,6 + bld r4,3 + bst r21,7 + bld r6,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,4 + bst r20,1 + bld r2,4 + bst r20,2 + bld r4,4 + bst r20,3 + bld r6,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r2,5 + bst r20,6 + bld r4,5 + bst r20,7 + bld r6,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r2,6 + bst r21,2 + bld r4,6 + bst r21,3 + bld r6,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r2,7 + bst r21,6 + bld r4,7 + bst r21,7 + bld r6,7 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,0 + bst r20,1 + bld r3,0 + bst r20,2 + bld r5,0 + bst r20,3 + bld r7,0 + bst r20,4 + bld r23,1 + bst r20,5 + bld r3,1 + bst r20,6 + bld r5,1 + bst r20,7 + bld r7,1 + bst r21,0 + bld r23,2 + bst r21,1 + bld r3,2 + bst r21,2 + bld r5,2 + bst r21,3 + bld r7,2 + bst r21,4 + bld r23,3 + bst r21,5 + bld r3,3 + bst r21,6 + bld r5,3 + bst r21,7 + bld r7,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,4 + bst r20,1 + bld r3,4 + bst r20,2 + bld r5,4 + bst r20,3 + bld r7,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r3,5 + bst r20,6 + bld r5,5 + bst r20,7 + bld r7,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r3,6 + bst r21,2 + bld r5,6 + bst r21,3 + bld r7,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r3,7 + bst r21,6 + bld r5,7 + bst r21,7 + bld r7,7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,11 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,5 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,2 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,33 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,48 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,24 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,44 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,22 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,43 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,53 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,58 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,29 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,14 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,39 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,51 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,57 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,60 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,30 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,47 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,55 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,59 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,61 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,62 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,31 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,15 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,7 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,3 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,1 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + rjmp 1374f +1185: + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + bst r22,1 + bld r0,0 + bst r23,4 + bld r22,1 + bst r22,3 + bld r23,4 + bst r22,4 + bld r22,3 + bst r0,0 + bld r22,4 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r0,0 + bld r23,0 + bst r22,5 + bld r0,0 + bst r23,5 + bld r22,5 + bst r23,7 + bld r23,5 + bst r22,7 + bld r23,7 + bst r0,0 + bld r22,7 + bst r22,6 + bld r0,0 + bst r23,1 + bld r22,6 + bst r23,6 + bld r23,1 + bst r23,3 + bld r23,6 + bst r0,0 + bld r23,3 + bst r2,0 + bld r0,0 + bst r2,4 + bld r2,0 + bst r2,5 + bld r2,4 + bst r2,1 + bld r2,5 + bst r0,0 + bld r2,1 + bst r2,2 + bld r0,0 + bst r3,4 + bld r2,2 + bst r2,7 + bld r3,4 + bst r3,1 + bld r2,7 + bst r0,0 + bld r3,1 + bst r2,3 + bld r0,0 + bst r3,0 + bld r2,3 + bst r2,6 + bld r3,0 + bst r3,5 + bld r2,6 + bst r0,0 + bld r3,5 + bst r3,2 + bld r0,0 + bst r3,6 + bld r3,2 + bst r3,7 + bld r3,6 + bst r3,3 + bld r3,7 + bst r0,0 + bld r3,3 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r4,2 + bld r5,2 + bst r0,0 + bld r4,2 + bst r4,1 + bld r0,0 + bst r4,4 + bld r4,1 + bst r5,1 + bld r4,4 + bst r4,6 + bld r5,1 + bst r0,0 + bld r4,6 + bst r4,3 + bld r0,0 + bst r5,4 + bld r4,3 + bst r5,3 + bld r5,4 + bst r5,6 + bld r5,3 + bst r0,0 + bld r5,6 + bst r4,7 + bld r0,0 + bst r5,5 + bld r4,7 + bst r0,0 + bld r5,5 + bst r6,0 + bld r0,0 + bst r7,4 + bld r6,0 + bst r7,7 + bld r7,4 + bst r6,3 + bld r7,7 + bst r0,0 + bld r6,3 + bst r6,1 + bld r0,0 + bst r7,0 + bld r6,1 + bst r7,6 + bld r7,0 + bst r6,7 + bld r7,6 + bst r0,0 + bld r6,7 + bst r6,2 + bld r0,0 + bst r6,4 + bld r6,2 + bst r7,5 + bld r6,4 + bst r7,3 + bld r7,5 + bst r0,0 + bld r7,3 + bst r6,5 + bld r0,0 + bst r7,1 + bld r6,5 + bst r7,2 + bld r7,1 + bst r6,6 + bld r7,2 + bst r0,0 + bld r6,6 + movw r20,r6 + movw r6,r22 + movw r22,r20 + and r20,r2 + and r21,r3 + eor r4,r20 + eor r5,r21 + com r6 + com r7 + eor r2,r6 + eor r3,r7 + eor r6,r4 + eor r7,r5 + mov r0,r22 + or r0,r2 + eor r4,r0 + mov r0,r23 + or r0,r3 + eor r5,r0 + mov r0,r2 + and r0,r6 + eor r22,r0 + mov r0,r3 + and r0,r7 + eor r23,r0 + mov r0,r22 + and r0,r4 + eor r2,r0 + mov r0,r23 + and r0,r5 + eor r3,r0 + ret +1374: + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r20,0 + bst r2,0 + bld r20,1 + bst r4,0 + bld r20,2 + bst r6,0 + bld r20,3 + bst r22,1 + bld r20,4 + bst r2,1 + bld r20,5 + bst r4,1 + bld r20,6 + bst r6,1 + bld r20,7 + bst r22,2 + bld r21,0 + bst r2,2 + bld r21,1 + bst r4,2 + bld r21,2 + bst r6,2 + bld r21,3 + bst r22,3 + bld r21,4 + bst r2,3 + bld r21,5 + bst r4,3 + bld r21,6 + bst r6,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r22,4 + bld r20,0 + bst r2,4 + bld r20,1 + bst r4,4 + bld r20,2 + bst r6,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r2,5 + bld r20,5 + bst r4,5 + bld r20,6 + bst r6,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r2,6 + bld r21,1 + bst r4,6 + bld r21,2 + bst r6,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r2,7 + bld r21,5 + bst r4,7 + bld r21,6 + bst r6,7 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,0 + bld r20,0 + bst r3,0 + bld r20,1 + bst r5,0 + bld r20,2 + bst r7,0 + bld r20,3 + bst r23,1 + bld r20,4 + bst r3,1 + bld r20,5 + bst r5,1 + bld r20,6 + bst r7,1 + bld r20,7 + bst r23,2 + bld r21,0 + bst r3,2 + bld r21,1 + bst r5,2 + bld r21,2 + bst r7,2 + bld r21,3 + bst r23,3 + bld r21,4 + bst r3,3 + bld r21,5 + bst r5,3 + bld r21,6 + bst r7,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,4 + bld r20,0 + bst r3,4 + bld r20,1 + bst r5,4 + bld r20,2 + bst r7,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r3,5 + bld r20,5 + bst r5,5 + bld r20,6 + bst r7,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r3,6 + bld r21,1 + bst r5,6 + bld r21,2 + bst r7,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r3,7 + bld r21,5 + bst r5,7 + bld r21,6 + bst r7,7 + bld r21,7 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64t_decrypt, .-gift64t_decrypt + +#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.c b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.c index 321d079..81bc8a3 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.c +++ b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.c @@ -24,6 +24,8 @@ #include "internal-util.h" #include +#if !GIFT64_LOW_MEMORY + /* Round constants for GIFT-64 in the fixsliced representation */ static uint32_t const GIFT64_RC[28] = { 0x22000011, 0x00002299, 0x11118811, 0x880000ff, 0x33111199, 0x990022ee, @@ -33,19 +35,6 @@ static uint32_t const GIFT64_RC[28] = { 0x22008811, 0x00002288, 0x00118811, 0x880000bb }; -int gift64b_init - (gift64b_key_schedule_t *ks, const unsigned char *key, size_t key_len) -{ - if (!ks || !key || key_len != 16) - return 0; - ks->k[0] = be_load_word32(key); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key + 12); - gift64b_update_round_keys(ks); - return 1; -} - /* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ #define bit_permute_step(_y, mask, shift) \ do { \ @@ -249,7 +238,7 @@ int gift64b_init ((out & 0x00000F00U) << 8) | ((out & 0x0000F000U) << 12); \ } while (0) -void gift64b_update_round_keys(gift64b_key_schedule_t *ks) +void gift64n_update_round_keys(gift64n_key_schedule_t *ks) { uint32_t x; @@ -293,7 +282,7 @@ void gift64b_update_round_keys(gift64b_key_schedule_t *ks) * \param Tweak value or zero if there is no tweak. */ static void gift64b_encrypt_core - (const gift64b_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) + (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) { const uint32_t *rc = GIFT64_RC; uint32_t s0, s1, s2, s3, temp; @@ -391,7 +380,7 @@ static void gift64b_encrypt_core * \param Tweak value or zero if there is no tweak. */ static void gift64b_decrypt_core - (const gift64b_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) + (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) { const uint32_t *rc = GIFT64_RC + 28 - 4; uint32_t s0, s1, s2, s3, temp; @@ -513,18 +502,14 @@ static void gift64b_decrypt_core state[3] = s3; } -int gift64n_init - (gift64n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) { /* Use the little-endian byte order from the LOTUS-AEAD submission */ - if (!ks || !key || key_len != 16) - return 0; ks->k[0] = le_load_word32(key + 12); ks->k[1] = le_load_word32(key + 8); ks->k[2] = le_load_word32(key + 4); ks->k[3] = le_load_word32(key); - gift64b_update_round_keys(ks); - return 1; + gift64n_update_round_keys(ks); } /** @@ -622,124 +607,599 @@ void gift64n_decrypt gift64n_to_nibbles(output, state); } +/* 4-bit tweak values expanded to 32-bit in fixsliced form */ +static uint32_t const GIFT64_tweaks[16] = { + 0x00000000, 0xee11ee11, 0xdd22dd22, 0x33333333, 0xbb44bb44, 0x55555555, + 0x66666666, 0x88778877, 0x77887788, 0x99999999, 0xaaaaaaaa, 0x44bb44bb, + 0xcccccccc, 0x22dd22dd, 0x11ee11ee, 0xffffffff +}; + +void gift64t_encrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint16_t tweak) +{ + uint32_t state[4]; + gift64n_to_words(state, input); + gift64b_encrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); + gift64n_to_nibbles(output, state); +} + +void gift64t_decrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint16_t tweak) +{ + uint32_t state[4]; + gift64n_to_words(state, input); + gift64b_decrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); + gift64n_to_nibbles(output, state); +} + +#elif !defined(__AVR__) /* GIFT64_LOW_MEMORY */ + +/* Round constants for GIFT-64 */ +static uint8_t const GIFT64_RC[28] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B +}; + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint16_t y = (_y); \ + uint16_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step_simple */ +#define bit_permute_step_simple(_y, mask, shift) \ + do { \ + (_y) = (((_y) & (mask)) << (shift)) | (((_y) >> (shift)) & (mask)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 4 bits with respect to the next: + * + * P0: 0 12 8 4 1 13 9 5 2 14 10 6 3 15 11 7 + * P1: 4 0 12 8 5 1 13 9 6 2 14 10 7 3 15 11 + * P2: 8 4 0 12 9 5 1 13 10 6 2 14 11 7 3 15 + * P3: 12 8 4 0 13 9 5 1 14 10 6 2 15 11 7 3 + * + * The most efficient permutation from the online generator was P1, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P1 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM1_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a, 3); \ + bit_permute_step(x, 0x00cc, 6); \ + bit_permute_step_simple(x, 0x0f0f, 4); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM1_INNER(_x); \ + (x) = leftRotate12_16(_x); \ + } while (0) +#define PERM1(x) PERM1_INNER(x) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM1_INNER(_x); \ + (x) = leftRotate4_16(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM1_INNER(_x); \ + (x) = leftRotate8_16(_x); \ + } while (0) + +#define INV_PERM1_INNER(x) \ + do { \ + bit_permute_step(x, 0x0505, 5); \ + bit_permute_step(x, 0x00cc, 6); \ + bit_permute_step_simple(x, 0x0f0f, 4); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate12_16(x); \ + INV_PERM1_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) INV_PERM1_INNER(x) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate4_16(x); \ + INV_PERM1_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = rightRotate8_16(x); \ + INV_PERM1_INNER(_x); \ + (x) = _x; \ + } while (0) + /** - * \brief Converts the GIFT-64 nibble-based representation into word-based - * (big-endian version). + * \brief Encrypts a 64-bit block with GIFT-64 (bit-sliced). * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. + * \param ks Points to the GIFT-64 key schedule. + * \param output Output buffer which must be at least 8 bytes in length. + * \param input Input buffer which must be at least 8 bytes in length. * - * The output words will be in fixsliced form. Technically the output will - * contain two blocks for gift64b_encrypt_core() to process in parallel but - * both blocks will have the same value. + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. */ -static void gift64nb_to_words(uint32_t output[4], const unsigned char *input) +static void gift64b_encrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { - uint32_t s0, s1, s2, s3; + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input block into 32-bit words */ - s0 = be_load_word32(input + 4); - s2 = be_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word16(input); + s1 = be_load_word16(input + 2); + s2 = be_load_word16(input + 4); + s3 = be_load_word16(input + 6); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + + /* Perform all 28 rounds */ + for (round = 0; round < 28; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 64-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bits in the block */ - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); +} - /* Split into two identical blocks in fixsliced form */ - s1 = s0; - s3 = s2; - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; +/** + * \brief Decrypts a 64-bit block with GIFT-64 (bit-sliced). + * + * \param ks Points to the GIFT-64 key schedule. + * \param output Output buffer which must be at least 8 bytes in length. + * \param input Input buffer which must be at least 8 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place decryption. + */ +static void gift64b_decrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word16(input); + s1 = be_load_word16(input + 2); + s2 = be_load_word16(input + 4); + s3 = be_load_word16(input + 6); + + /* Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 28; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 7 times for the full 28 rounds. The overall + * effect is to apply a "14 right and 28 left" bit-rotation to every word + * in the key schedule. That is equivalent to "14 right and 12 left" + * on the 16-bit sub-words. + */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | + ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); + w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | + ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); + w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | + ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); + w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | + ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); + + /* Perform all 28 rounds */ + for (round = 28; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); } +void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian byte order from the LOTUS-AEAD submission */ + ks->k[0] = le_load_word32(key + 12); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key); +} + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step_32(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + /** - * \brief Converts the GIFT-64 word-based representation into nibble-based - * (big-endian version). + * \brief Converts the GIFT-64 nibble-based representation into word-based. * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. * - * The input words are in fixsliced form. Technically there are two - * identical blocks in the input. We drop one when we write to the output. + * The \a input and \a output buffers can be the same buffer. */ -static void gift64nb_to_nibbles(unsigned char *output, const uint32_t input[4]) +static void gift64n_to_words + (unsigned char *output, const unsigned char *input) { - uint32_t s0, s1, s2, s3; + uint32_t s0, s1; - /* Load the state and split the two blocks into separate words */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); + /* Load the input buffer into 32-bit words. We use the nibble order from + * the LOTUS-AEAD submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-64 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 4); + s1 = le_load_word32(input); - /* Rearrange the bits in the first block back into nibble form */ - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - be_store_word32(output, s2); - be_store_word32(output + 4, s0); + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step_32(x, 0x0a0a0a0a, 3); \ + bit_permute_step_32(x, 0x00cc00cc, 6); \ + bit_permute_step_32(x, 0x0000f0f0, 12); \ + bit_permute_step_32(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)(s0 >> 8); + output[3] = (uint8_t)(s1 >> 8); + output[4] = (uint8_t)(s0 >> 16); + output[5] = (uint8_t)(s1 >> 16); + output[6] = (uint8_t)(s0 >> 24); + output[7] = (uint8_t)(s1 >> 24); } -void gift64nb_encrypt +/** + * \brief Converts the GIFT-64 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift64n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s1 contains the least significant */ + s0 = (((uint32_t)(input[6])) << 24) | + (((uint32_t)(input[4])) << 16) | + (((uint32_t)(input[2])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[7])) << 24) | + (((uint32_t)(input[5])) << 16) | + (((uint32_t)(input[3])) << 8) | + ((uint32_t)(input[1])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step_32(x, 0x00aa00aa, 7); \ + bit_permute_step_32(x, 0x0000cccc, 14); \ + bit_permute_step_32(x, 0x00f000f0, 4); \ + bit_permute_step_32(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 4, s0); + le_store_word32(output, s1); +} + +void gift64n_encrypt (const gift64n_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - uint32_t state[4]; - gift64nb_to_words(state, input); - gift64b_encrypt_core(ks, state, 0); - gift64nb_to_nibbles(output, state); + gift64n_to_words(output, input); + gift64b_encrypt(ks, output, output); + gift64n_to_nibbles(output, output); } -void gift64nb_decrypt +void gift64n_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - uint32_t state[4]; - gift64nb_to_words(state, input); - gift64b_decrypt_core(ks, state, 0); - gift64nb_to_nibbles(output, state); + gift64n_to_words(output, input); + gift64b_decrypt(ks, output, output); + gift64n_to_nibbles(output, output); } -/* 4-bit tweak values expanded to 32-bit in fixsliced form */ -static uint32_t const GIFT64_tweaks[16] = { - 0x00000000, 0xee11ee11, 0xdd22dd22, 0x33333333, 0xbb44bb44, 0x55555555, - 0x66666666, 0x88778877, 0x77887788, 0x99999999, 0xaaaaaaaa, 0x44bb44bb, - 0xcccccccc, 0x22dd22dd, 0x11ee11ee, 0xffffffff -}; - void gift64t_encrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint16_t tweak) { - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_encrypt_core(ks, state, GIFT64_tweaks[tweak]); - gift64n_to_nibbles(output, state); + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift64n_to_words(output, input); + s0 = be_load_word16(output); + s1 = be_load_word16(output + 2); + s2 = be_load_word16(output + 4); + s3 = be_load_word16(output + 6); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + + /* Perform all 28 rounds */ + for (round = 0; round < 28; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 64-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round]; + + /* AddTweak - XOR in the tweak every 4 rounds except the last */ + if (((round + 1) % 4) == 0 && round < 27) + s2 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); + gift64n_to_nibbles(output, output); } void gift64t_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint16_t tweak) { - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_decrypt_core(ks, state, GIFT64_tweaks[tweak]); - gift64n_to_nibbles(output, state); + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from nibbles */ + gift64n_to_words(output, input); + s0 = be_load_word16(output); + s1 = be_load_word16(output + 2); + s2 = be_load_word16(output + 4); + s3 = be_load_word16(output + 6); + + /* Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 28; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 7 times for the full 28 rounds. The overall + * effect is to apply a "14 right and 28 left" bit-rotation to every word + * in the key schedule. That is equivalent to "14 right and 12 left" + * on the 16-bit sub-words. + */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | + ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); + w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | + ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); + w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | + ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); + w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | + ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); + + /* Perform all 28 rounds */ + for (round = 28; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 4 rounds except the last */ + if ((round % 4) == 0 && round != 28) + s2 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in nibble form */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); + gift64n_to_nibbles(output, output); } + +#endif /* GIFT64_LOW_MEMORY */ diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.h b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.h index 40479c7..010359b 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.h +++ b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-gift64.h @@ -28,6 +28,7 @@ * \brief GIFT-64 block cipher. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ @@ -39,57 +40,63 @@ extern "C" { #endif /** + * \var GIFT64_LOW_MEMORY + * \brief Define this to 1 to use a low memory version of the key schedule. + * + * The default is to use the fix-sliced version of GIFT-64 which is very + * fast on 32-bit platforms but requires 48 bytes to store the key schedule. + * The large key schedule may be a problem on 8-bit and 16-bit platforms. + * The fix-sliced version also encrypts two blocks at a time in 32-bit + * words which is an unnecessary optimization for 8-bit platforms. + * + * GIFT64_LOW_MEMORY can be defined to 1 to select the original non + * fix-sliced version which only requires 16 bytes to store the key, + * with the rest of the key schedule expanded on the fly. + */ +#if !defined(GIFT64_LOW_MEMORY) +#if defined(__AVR__) +#define GIFT64_LOW_MEMORY 1 +#else +#define GIFT64_LOW_MEMORY 0 +#endif +#endif + +/** * \brief Size of a GIFT-64 block in bytes. */ #define GIFT64_BLOCK_SIZE 8 /** - * \brief Structure of the key schedule for GIFT-64 (bit-sliced). + * \brief Structure of the key schedule for GIFT-64. */ typedef struct { uint32_t k[4]; /**< Words of the key schedule */ +#if !GIFT64_LOW_MEMORY uint32_t rk[8]; /**< Pre-computed round keys for fixsliced form */ +#endif -} gift64b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-64 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int gift64b_init - (gift64b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +} gift64n_key_schedule_t; /** + * \fn void gift64n_update_round_keys(gift64n_key_schedule_t *ks); * \brief Updates the round keys after a change in the base key. * * \param ks Points to the key schedule to update. */ -void gift64b_update_round_keys(gift64b_key_schedule_t *ks); - -/** - * \brief Structure of the key schedule for GIFT-64 (nibble-based). - */ -typedef gift64b_key_schedule_t gift64n_key_schedule_t; +#if GIFT64_LOW_MEMORY +#define gift64n_update_round_keys(ks) do { ; } while (0) /* Not needed */ +#else +void gift64n_update_round_keys(gift64n_key_schedule_t *ks); +#endif /** * \brief Initializes the key schedule for GIFT-64 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift64n_init - (gift64n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 64-bit block with GIFT-64 (nibble-based). @@ -119,33 +126,23 @@ void gift64n_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); -/** - * \brief Encrypts a 64-bit block with GIFT-64 (nibble-based big-endian). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift64nb_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 64-bit block with GIFT-64 (nibble-based big-endian). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift64nb_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); +/* 4-bit tweak values expanded to 16-bit for TweGIFT-64 */ +#define GIFT64T_TWEAK_0 0x0000 /**< TweGIFT-64 tweak value 0 */ +#define GIFT64T_TWEAK_1 0xe1e1 /**< TweGIFT-64 tweak value 1 */ +#define GIFT64T_TWEAK_2 0xd2d2 /**< TweGIFT-64 tweak value 2 */ +#define GIFT64T_TWEAK_3 0x3333 /**< TweGIFT-64 tweak value 3 */ +#define GIFT64T_TWEAK_4 0xb4b4 /**< TweGIFT-64 tweak value 4 */ +#define GIFT64T_TWEAK_5 0x5555 /**< TweGIFT-64 tweak value 5 */ +#define GIFT64T_TWEAK_6 0x6666 /**< TweGIFT-64 tweak value 6 */ +#define GIFT64T_TWEAK_7 0x8787 /**< TweGIFT-64 tweak value 7 */ +#define GIFT64T_TWEAK_8 0x7878 /**< TweGIFT-64 tweak value 8 */ +#define GIFT64T_TWEAK_9 0x9999 /**< TweGIFT-64 tweak value 9 */ +#define GIFT64T_TWEAK_10 0xaaaa /**< TweGIFT-64 tweak value 10 */ +#define GIFT64T_TWEAK_11 0x4b4b /**< TweGIFT-64 tweak value 11 */ +#define GIFT64T_TWEAK_12 0xcccc /**< TweGIFT-64 tweak value 12 */ +#define GIFT64T_TWEAK_13 0x2d2d /**< TweGIFT-64 tweak value 13 */ +#define GIFT64T_TWEAK_14 0x1e1e /**< TweGIFT-64 tweak value 14 */ +#define GIFT64T_TWEAK_15 0xffff /**< TweGIFT-64 tweak value 15 */ /** * \brief Encrypts a 64-bit block with TweGIFT-64 (tweakable variant). @@ -153,7 +150,7 @@ void gift64nb_decrypt * \param ks Points to the GIFT-64 key schedule. * \param output Output buffer which must be at least 8 bytes in length. * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 16-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -165,7 +162,7 @@ void gift64nb_decrypt */ void gift64t_encrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint16_t tweak); /** * \brief Decrypts a 64-bit block with TweGIFT-64 (tweakable variant). @@ -173,7 +170,7 @@ void gift64t_encrypt * \param ks Points to the GIFT-64 key schedule. * \param output Output buffer which must be at least 8 bytes in length. * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 16-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -185,7 +182,7 @@ void gift64t_encrypt */ void gift64t_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint16_t tweak); #ifdef __cplusplus } diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-util.h b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-util.h +++ b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/lotus-locus.c b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/lotus-locus.c index e60b084..4a1efd0 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/lotus-locus.c +++ b/lotus-locus/Implementations/crypto_aead/twegift64locusaeadv1/rhys/lotus-locus.c @@ -57,7 +57,7 @@ STATIC_INLINE void lotus_or_locus_mul_2(gift64n_key_schedule_t *ks) ks->k[1] = (ks->k[1] << 1) | (ks->k[2] >> 31); ks->k[2] = (ks->k[2] << 1) | (ks->k[3] >> 31); ks->k[3] = (ks->k[3] << 1) ^ (mask & 0x87); - gift64b_update_round_keys(ks); + gift64n_update_round_keys(ks); } /** @@ -77,12 +77,12 @@ static void lotus_or_locus_init const unsigned char *nonce, unsigned char *T) { - gift64n_init(ks, key, LOTUS_AEAD_KEY_SIZE); + gift64n_init(ks, key); memset(deltaN, 0, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, deltaN, deltaN, 0); + gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_0); lw_xor_block_2_src(T, key, nonce, LOTUS_AEAD_KEY_SIZE); - gift64n_init(ks, T, LOTUS_AEAD_KEY_SIZE); - gift64t_encrypt(ks, deltaN, deltaN, 1); + gift64n_init(ks, T); + gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_1); } /** @@ -105,7 +105,7 @@ static void lotus_or_locus_process_ad while (adlen > GIFT64_BLOCK_SIZE) { lotus_or_locus_mul_2(ks); lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, 2); + gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); lw_xor_block(V, X, GIFT64_BLOCK_SIZE); ad += GIFT64_BLOCK_SIZE; adlen -= GIFT64_BLOCK_SIZE; @@ -116,10 +116,10 @@ static void lotus_or_locus_process_ad memcpy(X, deltaN, GIFT64_BLOCK_SIZE); lw_xor_block(X, ad, temp); X[temp] ^= 0x01; - gift64t_encrypt(ks, X, X, 3); + gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_3); } else { lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, 2); + gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); } lw_xor_block(V, X, GIFT64_BLOCK_SIZE); } @@ -142,7 +142,7 @@ static void lotus_or_locus_gen_tag lotus_or_locus_mul_2(ks); lw_xor_block(W, deltaN, GIFT64_BLOCK_SIZE); lw_xor_block(W, V, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, W, W, 6); + gift64t_encrypt(ks, W, W, GIFT64T_TWEAK_6); lw_xor_block_2_src(tag, W, deltaN, GIFT64_BLOCK_SIZE); } @@ -180,15 +180,15 @@ int lotus_aead_encrypt while (mlen > (GIFT64_BLOCK_SIZE * 2)) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X1, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, 4); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_4); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 4); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); lw_xor_block_2_src (X2, m + GIFT64_BLOCK_SIZE, X2, GIFT64_BLOCK_SIZE); lw_xor_block_2_src(c, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 5); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 5); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); lw_xor_block_2_src (c + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE * 2; @@ -199,9 +199,9 @@ int lotus_aead_encrypt lotus_or_locus_mul_2(&ks); memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, 12); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 12); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); if (temp <= GIFT64_BLOCK_SIZE) { lw_xor_block(WV, m, temp); lw_xor_block(X2, m, temp); @@ -212,9 +212,9 @@ int lotus_aead_encrypt c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(WV, m, temp); lw_xor_block(X1, X2, temp); lw_xor_block_2_src(c, X1, m, temp); @@ -265,14 +265,14 @@ int lotus_aead_decrypt while (clen > (GIFT64_BLOCK_SIZE * 2)) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X1, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, 5); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_5); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 5); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); lw_xor_block(X2, c + GIFT64_BLOCK_SIZE, GIFT64_BLOCK_SIZE); lw_xor_block_2_src(m, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 4); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 4); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); lw_xor_block_2_src (m + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE * 2; @@ -283,9 +283,9 @@ int lotus_aead_decrypt lotus_or_locus_mul_2(&ks); memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, 12); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 12); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); if (temp <= GIFT64_BLOCK_SIZE) { lw_xor_block_2_src(m, X2, c, temp); lw_xor_block(m, deltaN, temp); @@ -298,9 +298,9 @@ int lotus_aead_decrypt c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(X1, X2, temp); lw_xor_block_2_src(m, X1, c, temp); lw_xor_block(WV, m, temp); @@ -346,9 +346,9 @@ int locus_aead_encrypt while (mlen > GIFT64_BLOCK_SIZE) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, 4); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, 4); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block_2_src(c, X, deltaN, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; @@ -358,10 +358,10 @@ int locus_aead_encrypt lotus_or_locus_mul_2(&ks); memcpy(X, deltaN, GIFT64_BLOCK_SIZE); X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); lw_xor_block(WV, m, temp); - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(X, deltaN, temp); lw_xor_block_2_src(c, m, X, temp); c += temp; @@ -409,9 +409,9 @@ int locus_aead_decrypt while (clen > GIFT64_BLOCK_SIZE) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, 4); + gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, 4); + gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block_2_src(m, X, deltaN, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; @@ -421,9 +421,9 @@ int locus_aead_decrypt lotus_or_locus_mul_2(&ks); memcpy(X, deltaN, GIFT64_BLOCK_SIZE); X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(X, deltaN, temp); lw_xor_block_2_src(m, c, X, temp); lw_xor_block(WV, m, temp); diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.c b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.h b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/api.h b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/api.h deleted file mode 100644 index 4bf8f5c..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/encrypt.c b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/encrypt.c deleted file mode 100644 index e089543..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "lotus-locus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return lotus_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return lotus_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64-avr.S b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64-avr.S deleted file mode 100644 index fdb668d..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64-avr.S +++ /dev/null @@ -1,6047 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global gift64n_init - .type gift64n_init, @function -gift64n_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ret - .size gift64n_init, .-gift64n_init - - .text -.global gift64n_encrypt - .type gift64n_encrypt, @function -gift64n_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 28 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Z+4 - ldd r7,Z+5 - ldd r8,Z+6 - ldd r9,Z+7 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Z+12 - ldd r7,Z+13 - ldd r8,Z+14 - ldd r9,Z+15 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,0 - bst r18,1 - bld r22,0 - bst r18,2 - bld r2,0 - bst r18,3 - bld r4,0 - bst r18,4 - bld r20,1 - bst r18,5 - bld r22,1 - bst r18,6 - bld r2,1 - bst r18,7 - bld r4,1 - bst r19,0 - bld r20,2 - bst r19,1 - bld r22,2 - bst r19,2 - bld r2,2 - bst r19,3 - bld r4,2 - bst r19,4 - bld r20,3 - bst r19,5 - bld r22,3 - bst r19,6 - bld r2,3 - bst r19,7 - bld r4,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,4 - bst r18,1 - bld r22,4 - bst r18,2 - bld r2,4 - bst r18,3 - bld r4,4 - bst r18,4 - bld r20,5 - bst r18,5 - bld r22,5 - bst r18,6 - bld r2,5 - bst r18,7 - bld r4,5 - bst r19,0 - bld r20,6 - bst r19,1 - bld r22,6 - bst r19,2 - bld r2,6 - bst r19,3 - bld r4,6 - bst r19,4 - bld r20,7 - bst r19,5 - bld r22,7 - bst r19,6 - bld r2,7 - bst r19,7 - bld r4,7 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,0 - bst r18,1 - bld r23,0 - bst r18,2 - bld r3,0 - bst r18,3 - bld r5,0 - bst r18,4 - bld r21,1 - bst r18,5 - bld r23,1 - bst r18,6 - bld r3,1 - bst r18,7 - bld r5,1 - bst r19,0 - bld r21,2 - bst r19,1 - bld r23,2 - bst r19,2 - bld r3,2 - bst r19,3 - bld r5,2 - bst r19,4 - bld r21,3 - bst r19,5 - bld r23,3 - bst r19,6 - bld r3,3 - bst r19,7 - bld r5,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,4 - bst r18,1 - bld r23,4 - bst r18,2 - bld r3,4 - bst r18,3 - bld r5,4 - bst r18,4 - bld r21,5 - bst r18,5 - bld r23,5 - bst r18,6 - bld r3,5 - bst r18,7 - bld r5,5 - bst r19,0 - bld r21,6 - bst r19,1 - bld r23,6 - bst r19,2 - bld r3,6 - bst r19,3 - bld r5,6 - bst r19,4 - bld r21,7 - bst r19,5 - bld r23,7 - bst r19,6 - bld r3,7 - bst r19,7 - bld r5,7 - rcall 1061f - ldi r18,1 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,3 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,7 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,15 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,31 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,62 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,61 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,59 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,55 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,47 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,30 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,60 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,57 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,51 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,39 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,14 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,29 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,58 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,53 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,43 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,22 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,44 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,24 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,48 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - rcall 1061f - ldi r18,33 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - rcall 1061f - ldi r18,2 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - rcall 1061f - ldi r18,5 - ldi r19,128 - eor r4,r18 - eor r5,r19 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - rcall 1061f - ldi r18,11 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rjmp 1252f -1061: - mov r0,r20 - and r0,r2 - eor r22,r0 - mov r0,r21 - and r0,r3 - eor r23,r0 - mov r0,r22 - and r0,r4 - eor r20,r0 - mov r0,r23 - and r0,r5 - eor r21,r0 - mov r0,r20 - or r0,r22 - eor r2,r0 - mov r0,r21 - or r0,r23 - eor r3,r0 - eor r4,r2 - eor r5,r3 - eor r22,r4 - eor r23,r5 - com r4 - com r5 - movw r18,r20 - mov r0,r22 - and r0,r18 - eor r2,r0 - mov r0,r23 - and r0,r19 - eor r3,r0 - movw r20,r4 - movw r4,r18 - bst r20,1 - bld r0,0 - bst r20,4 - bld r20,1 - bst r20,3 - bld r20,4 - bst r21,4 - bld r20,3 - bst r0,0 - bld r21,4 - bst r20,2 - bld r0,0 - bst r21,0 - bld r20,2 - bst r0,0 - bld r21,0 - bst r20,5 - bld r0,0 - bst r20,7 - bld r20,5 - bst r21,7 - bld r20,7 - bst r21,5 - bld r21,7 - bst r0,0 - bld r21,5 - bst r20,6 - bld r0,0 - bst r21,3 - bld r20,6 - bst r21,6 - bld r21,3 - bst r21,1 - bld r21,6 - bst r0,0 - bld r21,1 - bst r22,0 - bld r0,0 - bst r22,1 - bld r22,0 - bst r22,5 - bld r22,1 - bst r22,4 - bld r22,5 - bst r0,0 - bld r22,4 - bst r22,2 - bld r0,0 - bst r23,1 - bld r22,2 - bst r22,7 - bld r23,1 - bst r23,4 - bld r22,7 - bst r0,0 - bld r23,4 - bst r22,3 - bld r0,0 - bst r23,5 - bld r22,3 - bst r22,6 - bld r23,5 - bst r23,0 - bld r22,6 - bst r0,0 - bld r23,0 - bst r23,2 - bld r0,0 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r23,6 - bld r23,7 - bst r0,0 - bld r23,6 - bst r2,0 - bld r0,0 - bst r2,2 - bld r2,0 - bst r3,2 - bld r2,2 - bst r3,0 - bld r3,2 - bst r0,0 - bld r3,0 - bst r2,1 - bld r0,0 - bst r2,6 - bld r2,1 - bst r3,1 - bld r2,6 - bst r2,4 - bld r3,1 - bst r0,0 - bld r2,4 - bst r2,3 - bld r0,0 - bst r3,6 - bld r2,3 - bst r3,3 - bld r3,6 - bst r3,4 - bld r3,3 - bst r0,0 - bld r3,4 - bst r2,7 - bld r0,0 - bst r3,5 - bld r2,7 - bst r0,0 - bld r3,5 - bst r4,0 - bld r0,0 - bst r4,3 - bld r4,0 - bst r5,7 - bld r4,3 - bst r5,4 - bld r5,7 - bst r0,0 - bld r5,4 - bst r4,1 - bld r0,0 - bst r4,7 - bld r4,1 - bst r5,6 - bld r4,7 - bst r5,0 - bld r5,6 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,3 - bld r4,2 - bst r5,5 - bld r5,3 - bst r4,4 - bld r5,5 - bst r0,0 - bld r4,4 - bst r4,5 - bld r0,0 - bst r4,6 - bld r4,5 - bst r5,2 - bld r4,6 - bst r5,1 - bld r5,2 - bst r0,0 - bld r5,1 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - ret -1252: - ldd r26,Y+17 - ldd r27,Y+18 - bst r20,0 - bld r18,0 - bst r22,0 - bld r18,1 - bst r2,0 - bld r18,2 - bst r4,0 - bld r18,3 - bst r20,1 - bld r18,4 - bst r22,1 - bld r18,5 - bst r2,1 - bld r18,6 - bst r4,1 - bld r18,7 - bst r20,2 - bld r19,0 - bst r22,2 - bld r19,1 - bst r2,2 - bld r19,2 - bst r4,2 - bld r19,3 - bst r20,3 - bld r19,4 - bst r22,3 - bld r19,5 - bst r2,3 - bld r19,6 - bst r4,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r20,4 - bld r18,0 - bst r22,4 - bld r18,1 - bst r2,4 - bld r18,2 - bst r4,4 - bld r18,3 - bst r20,5 - bld r18,4 - bst r22,5 - bld r18,5 - bst r2,5 - bld r18,6 - bst r4,5 - bld r18,7 - bst r20,6 - bld r19,0 - bst r22,6 - bld r19,1 - bst r2,6 - bld r19,2 - bst r4,6 - bld r19,3 - bst r20,7 - bld r19,4 - bst r22,7 - bld r19,5 - bst r2,7 - bld r19,6 - bst r4,7 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,0 - bld r18,0 - bst r23,0 - bld r18,1 - bst r3,0 - bld r18,2 - bst r5,0 - bld r18,3 - bst r21,1 - bld r18,4 - bst r23,1 - bld r18,5 - bst r3,1 - bld r18,6 - bst r5,1 - bld r18,7 - bst r21,2 - bld r19,0 - bst r23,2 - bld r19,1 - bst r3,2 - bld r19,2 - bst r5,2 - bld r19,3 - bst r21,3 - bld r19,4 - bst r23,3 - bld r19,5 - bst r3,3 - bld r19,6 - bst r5,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,4 - bld r18,0 - bst r23,4 - bld r18,1 - bst r3,4 - bld r18,2 - bst r5,4 - bld r18,3 - bst r21,5 - bld r18,4 - bst r23,5 - bld r18,5 - bst r3,5 - bld r18,6 - bst r5,5 - bld r18,7 - bst r21,6 - bld r19,0 - bst r23,6 - bld r19,1 - bst r3,6 - bld r19,2 - bst r5,6 - bld r19,3 - bst r21,7 - bld r19,4 - bst r23,7 - bld r19,5 - bst r3,7 - bld r19,6 - bst r5,7 - bld r19,7 - st X+,r18 - st X+,r19 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64n_encrypt, .-gift64n_encrypt - - .text -.global gift64n_decrypt - .type gift64n_decrypt, @function -gift64n_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 28 - ld r6,Z - ldd r7,Z+1 - ldd r8,Z+2 - ldd r9,Z+3 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Z+4 - ldd r7,Z+5 - ldd r8,Z+6 - ldd r9,Z+7 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Z+8 - ldd r7,Z+9 - ldd r8,Z+10 - ldd r9,Z+11 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Z+12 - ldd r7,Z+13 - ldd r8,Z+14 - ldd r9,Z+15 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,0 - bst r18,1 - bld r22,0 - bst r18,2 - bld r2,0 - bst r18,3 - bld r4,0 - bst r18,4 - bld r20,1 - bst r18,5 - bld r22,1 - bst r18,6 - bld r2,1 - bst r18,7 - bld r4,1 - bst r19,0 - bld r20,2 - bst r19,1 - bld r22,2 - bst r19,2 - bld r2,2 - bst r19,3 - bld r4,2 - bst r19,4 - bld r20,3 - bst r19,5 - bld r22,3 - bst r19,6 - bld r2,3 - bst r19,7 - bld r4,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r20,4 - bst r18,1 - bld r22,4 - bst r18,2 - bld r2,4 - bst r18,3 - bld r4,4 - bst r18,4 - bld r20,5 - bst r18,5 - bld r22,5 - bst r18,6 - bld r2,5 - bst r18,7 - bld r4,5 - bst r19,0 - bld r20,6 - bst r19,1 - bld r22,6 - bst r19,2 - bld r2,6 - bst r19,3 - bld r4,6 - bst r19,4 - bld r20,7 - bst r19,5 - bld r22,7 - bst r19,6 - bld r2,7 - bst r19,7 - bld r4,7 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,0 - bst r18,1 - bld r23,0 - bst r18,2 - bld r3,0 - bst r18,3 - bld r5,0 - bst r18,4 - bld r21,1 - bst r18,5 - bld r23,1 - bst r18,6 - bld r3,1 - bst r18,7 - bld r5,1 - bst r19,0 - bld r21,2 - bst r19,1 - bld r23,2 - bst r19,2 - bld r3,2 - bst r19,3 - bld r5,2 - bst r19,4 - bld r21,3 - bst r19,5 - bld r23,3 - bst r19,6 - bld r3,3 - bst r19,7 - bld r5,3 - ld r18,X+ - ld r19,X+ - bst r18,0 - bld r21,4 - bst r18,1 - bld r23,4 - bst r18,2 - bld r3,4 - bst r18,3 - bld r5,4 - bst r18,4 - bld r21,5 - bst r18,5 - bld r23,5 - bst r18,6 - bld r3,5 - bst r18,7 - bld r5,5 - bst r19,0 - bld r21,6 - bst r19,1 - bld r23,6 - bst r19,2 - bld r3,6 - bst r19,3 - bld r5,6 - bst r19,4 - bld r21,7 - bst r19,5 - bld r23,7 - bst r19,6 - bld r3,7 - bst r19,7 - bld r5,7 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,11 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,5 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,2 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,33 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,48 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,24 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,44 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,22 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,43 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,53 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,58 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,29 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,14 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,39 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,51 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,57 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,60 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,30 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,47 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,55 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,59 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,61 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,62 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,31 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r6,Y+1 - ldd r7,Y+2 - ldd r8,Y+3 - ldd r9,Y+4 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,15 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+1,r6 - std Y+2,r7 - std Y+3,r8 - std Y+4,r9 - ldd r6,Y+5 - ldd r7,Y+6 - ldd r8,Y+7 - ldd r9,Y+8 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,7 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+5,r6 - std Y+6,r7 - std Y+7,r8 - std Y+8,r9 - ldd r6,Y+9 - ldd r7,Y+10 - ldd r8,Y+11 - ldd r9,Y+12 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,3 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - std Y+9,r6 - std Y+10,r7 - std Y+11,r8 - std Y+12,r9 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - mov r0,r1 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - lsr r7 - ror r6 - ror r0 - or r7,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - ldi r18,1 - ldi r19,128 - eor r4,r18 - eor r5,r19 - rcall 1173f - rjmp 1362f -1173: - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - bst r20,1 - bld r0,0 - bst r21,4 - bld r20,1 - bst r20,3 - bld r21,4 - bst r20,4 - bld r20,3 - bst r0,0 - bld r20,4 - bst r20,2 - bld r0,0 - bst r21,0 - bld r20,2 - bst r0,0 - bld r21,0 - bst r20,5 - bld r0,0 - bst r21,5 - bld r20,5 - bst r21,7 - bld r21,5 - bst r20,7 - bld r21,7 - bst r0,0 - bld r20,7 - bst r20,6 - bld r0,0 - bst r21,1 - bld r20,6 - bst r21,6 - bld r21,1 - bst r21,3 - bld r21,6 - bst r0,0 - bld r21,3 - bst r22,0 - bld r0,0 - bst r22,4 - bld r22,0 - bst r22,5 - bld r22,4 - bst r22,1 - bld r22,5 - bst r0,0 - bld r22,1 - bst r22,2 - bld r0,0 - bst r23,4 - bld r22,2 - bst r22,7 - bld r23,4 - bst r23,1 - bld r22,7 - bst r0,0 - bld r23,1 - bst r22,3 - bld r0,0 - bst r23,0 - bld r22,3 - bst r22,6 - bld r23,0 - bst r23,5 - bld r22,6 - bst r0,0 - bld r23,5 - bst r23,2 - bld r0,0 - bst r23,6 - bld r23,2 - bst r23,7 - bld r23,6 - bst r23,3 - bld r23,7 - bst r0,0 - bld r23,3 - bst r2,0 - bld r0,0 - bst r3,0 - bld r2,0 - bst r3,2 - bld r3,0 - bst r2,2 - bld r3,2 - bst r0,0 - bld r2,2 - bst r2,1 - bld r0,0 - bst r2,4 - bld r2,1 - bst r3,1 - bld r2,4 - bst r2,6 - bld r3,1 - bst r0,0 - bld r2,6 - bst r2,3 - bld r0,0 - bst r3,4 - bld r2,3 - bst r3,3 - bld r3,4 - bst r3,6 - bld r3,3 - bst r0,0 - bld r3,6 - bst r2,7 - bld r0,0 - bst r3,5 - bld r2,7 - bst r0,0 - bld r3,5 - bst r4,0 - bld r0,0 - bst r5,4 - bld r4,0 - bst r5,7 - bld r5,4 - bst r4,3 - bld r5,7 - bst r0,0 - bld r4,3 - bst r4,1 - bld r0,0 - bst r5,0 - bld r4,1 - bst r5,6 - bld r5,0 - bst r4,7 - bld r5,6 - bst r0,0 - bld r4,7 - bst r4,2 - bld r0,0 - bst r4,4 - bld r4,2 - bst r5,5 - bld r4,4 - bst r5,3 - bld r5,5 - bst r0,0 - bld r5,3 - bst r4,5 - bld r0,0 - bst r5,1 - bld r4,5 - bst r5,2 - bld r5,1 - bst r4,6 - bld r5,2 - bst r0,0 - bld r4,6 - movw r18,r4 - movw r4,r20 - movw r20,r18 - and r18,r22 - and r19,r23 - eor r2,r18 - eor r3,r19 - com r4 - com r5 - eor r22,r4 - eor r23,r5 - eor r4,r2 - eor r5,r3 - mov r0,r20 - or r0,r22 - eor r2,r0 - mov r0,r21 - or r0,r23 - eor r3,r0 - mov r0,r22 - and r0,r4 - eor r20,r0 - mov r0,r23 - and r0,r5 - eor r21,r0 - mov r0,r20 - and r0,r2 - eor r22,r0 - mov r0,r21 - and r0,r3 - eor r23,r0 - ret -1362: - ldd r26,Y+17 - ldd r27,Y+18 - bst r20,0 - bld r18,0 - bst r22,0 - bld r18,1 - bst r2,0 - bld r18,2 - bst r4,0 - bld r18,3 - bst r20,1 - bld r18,4 - bst r22,1 - bld r18,5 - bst r2,1 - bld r18,6 - bst r4,1 - bld r18,7 - bst r20,2 - bld r19,0 - bst r22,2 - bld r19,1 - bst r2,2 - bld r19,2 - bst r4,2 - bld r19,3 - bst r20,3 - bld r19,4 - bst r22,3 - bld r19,5 - bst r2,3 - bld r19,6 - bst r4,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r20,4 - bld r18,0 - bst r22,4 - bld r18,1 - bst r2,4 - bld r18,2 - bst r4,4 - bld r18,3 - bst r20,5 - bld r18,4 - bst r22,5 - bld r18,5 - bst r2,5 - bld r18,6 - bst r4,5 - bld r18,7 - bst r20,6 - bld r19,0 - bst r22,6 - bld r19,1 - bst r2,6 - bld r19,2 - bst r4,6 - bld r19,3 - bst r20,7 - bld r19,4 - bst r22,7 - bld r19,5 - bst r2,7 - bld r19,6 - bst r4,7 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,0 - bld r18,0 - bst r23,0 - bld r18,1 - bst r3,0 - bld r18,2 - bst r5,0 - bld r18,3 - bst r21,1 - bld r18,4 - bst r23,1 - bld r18,5 - bst r3,1 - bld r18,6 - bst r5,1 - bld r18,7 - bst r21,2 - bld r19,0 - bst r23,2 - bld r19,1 - bst r3,2 - bld r19,2 - bst r5,2 - bld r19,3 - bst r21,3 - bld r19,4 - bst r23,3 - bld r19,5 - bst r3,3 - bld r19,6 - bst r5,3 - bld r19,7 - st X+,r18 - st X+,r19 - bst r21,4 - bld r18,0 - bst r23,4 - bld r18,1 - bst r3,4 - bld r18,2 - bst r5,4 - bld r18,3 - bst r21,5 - bld r18,4 - bst r23,5 - bld r18,5 - bst r3,5 - bld r18,6 - bst r5,5 - bld r18,7 - bst r21,6 - bld r19,0 - bst r23,6 - bld r19,1 - bst r3,6 - bld r19,2 - bst r5,6 - bld r19,3 - bst r21,7 - bld r19,4 - bst r23,7 - bld r19,5 - bst r3,7 - bld r19,6 - bst r5,7 - bld r19,7 - st X+,r18 - st X+,r19 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64n_decrypt, .-gift64n_decrypt - - .text -.global gift64t_encrypt - .type gift64t_encrypt, @function -gift64t_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 30 - ld r8,Z - ldd r9,Z+1 - ldd r10,Z+2 - ldd r11,Z+3 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,0 - bst r20,1 - bld r2,0 - bst r20,2 - bld r4,0 - bst r20,3 - bld r6,0 - bst r20,4 - bld r22,1 - bst r20,5 - bld r2,1 - bst r20,6 - bld r4,1 - bst r20,7 - bld r6,1 - bst r21,0 - bld r22,2 - bst r21,1 - bld r2,2 - bst r21,2 - bld r4,2 - bst r21,3 - bld r6,2 - bst r21,4 - bld r22,3 - bst r21,5 - bld r2,3 - bst r21,6 - bld r4,3 - bst r21,7 - bld r6,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,4 - bst r20,1 - bld r2,4 - bst r20,2 - bld r4,4 - bst r20,3 - bld r6,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r2,5 - bst r20,6 - bld r4,5 - bst r20,7 - bld r6,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r2,6 - bst r21,2 - bld r4,6 - bst r21,3 - bld r6,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r2,7 - bst r21,6 - bld r4,7 - bst r21,7 - bld r6,7 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,0 - bst r20,1 - bld r3,0 - bst r20,2 - bld r5,0 - bst r20,3 - bld r7,0 - bst r20,4 - bld r23,1 - bst r20,5 - bld r3,1 - bst r20,6 - bld r5,1 - bst r20,7 - bld r7,1 - bst r21,0 - bld r23,2 - bst r21,1 - bld r3,2 - bst r21,2 - bld r5,2 - bst r21,3 - bld r7,2 - bst r21,4 - bld r23,3 - bst r21,5 - bld r3,3 - bst r21,6 - bld r5,3 - bst r21,7 - bld r7,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,4 - bst r20,1 - bld r3,4 - bst r20,2 - bld r5,4 - bst r20,3 - bld r7,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r3,5 - bst r20,6 - bld r5,5 - bst r20,7 - bld r7,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r3,6 - bst r21,2 - bld r5,6 - bst r21,3 - bld r7,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r3,7 - bst r21,6 - bld r5,7 - bst r21,7 - bld r7,7 - rcall 1073f - ldi r20,1 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,3 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,7 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,15 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,31 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,62 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,61 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,59 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,55 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,47 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,30 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,60 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,57 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,51 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,39 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,14 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,29 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,58 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,53 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,43 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,22 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,44 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,24 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,48 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - rcall 1073f - ldi r20,33 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - rcall 1073f - ldi r20,2 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - rcall 1073f - ldi r20,5 - ldi r21,128 - eor r6,r20 - eor r7,r21 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - rcall 1073f - ldi r20,11 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rjmp 1264f -1073: - mov r0,r22 - and r0,r4 - eor r2,r0 - mov r0,r23 - and r0,r5 - eor r3,r0 - mov r0,r2 - and r0,r6 - eor r22,r0 - mov r0,r3 - and r0,r7 - eor r23,r0 - mov r0,r22 - or r0,r2 - eor r4,r0 - mov r0,r23 - or r0,r3 - eor r5,r0 - eor r6,r4 - eor r7,r5 - eor r2,r6 - eor r3,r7 - com r6 - com r7 - movw r20,r22 - mov r0,r2 - and r0,r20 - eor r4,r0 - mov r0,r3 - and r0,r21 - eor r5,r0 - movw r22,r6 - movw r6,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r22,3 - bld r22,4 - bst r23,4 - bld r22,3 - bst r0,0 - bld r23,4 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r0,0 - bld r23,0 - bst r22,5 - bld r0,0 - bst r22,7 - bld r22,5 - bst r23,7 - bld r22,7 - bst r23,5 - bld r23,7 - bst r0,0 - bld r23,5 - bst r22,6 - bld r0,0 - bst r23,3 - bld r22,6 - bst r23,6 - bld r23,3 - bst r23,1 - bld r23,6 - bst r0,0 - bld r23,1 - bst r2,0 - bld r0,0 - bst r2,1 - bld r2,0 - bst r2,5 - bld r2,1 - bst r2,4 - bld r2,5 - bst r0,0 - bld r2,4 - bst r2,2 - bld r0,0 - bst r3,1 - bld r2,2 - bst r2,7 - bld r3,1 - bst r3,4 - bld r2,7 - bst r0,0 - bld r3,4 - bst r2,3 - bld r0,0 - bst r3,5 - bld r2,3 - bst r2,6 - bld r3,5 - bst r3,0 - bld r2,6 - bst r0,0 - bld r3,0 - bst r3,2 - bld r0,0 - bst r3,3 - bld r3,2 - bst r3,7 - bld r3,3 - bst r3,6 - bld r3,7 - bst r0,0 - bld r3,6 - bst r4,0 - bld r0,0 - bst r4,2 - bld r4,0 - bst r5,2 - bld r4,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,1 - bld r0,0 - bst r4,6 - bld r4,1 - bst r5,1 - bld r4,6 - bst r4,4 - bld r5,1 - bst r0,0 - bld r4,4 - bst r4,3 - bld r0,0 - bst r5,6 - bld r4,3 - bst r5,3 - bld r5,6 - bst r5,4 - bld r5,3 - bst r0,0 - bld r5,4 - bst r4,7 - bld r0,0 - bst r5,5 - bld r4,7 - bst r0,0 - bld r5,5 - bst r6,0 - bld r0,0 - bst r6,3 - bld r6,0 - bst r7,7 - bld r6,3 - bst r7,4 - bld r7,7 - bst r0,0 - bld r7,4 - bst r6,1 - bld r0,0 - bst r6,7 - bld r6,1 - bst r7,6 - bld r6,7 - bst r7,0 - bld r7,6 - bst r0,0 - bld r7,0 - bst r6,2 - bld r0,0 - bst r7,3 - bld r6,2 - bst r7,5 - bld r7,3 - bst r6,4 - bld r7,5 - bst r0,0 - bld r6,4 - bst r6,5 - bld r0,0 - bst r6,6 - bld r6,5 - bst r7,2 - bld r6,6 - bst r7,1 - bld r7,2 - bst r0,0 - bld r7,1 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ret -1264: - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r20,0 - bst r2,0 - bld r20,1 - bst r4,0 - bld r20,2 - bst r6,0 - bld r20,3 - bst r22,1 - bld r20,4 - bst r2,1 - bld r20,5 - bst r4,1 - bld r20,6 - bst r6,1 - bld r20,7 - bst r22,2 - bld r21,0 - bst r2,2 - bld r21,1 - bst r4,2 - bld r21,2 - bst r6,2 - bld r21,3 - bst r22,3 - bld r21,4 - bst r2,3 - bld r21,5 - bst r4,3 - bld r21,6 - bst r6,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r22,4 - bld r20,0 - bst r2,4 - bld r20,1 - bst r4,4 - bld r20,2 - bst r6,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r2,5 - bld r20,5 - bst r4,5 - bld r20,6 - bst r6,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r2,6 - bld r21,1 - bst r4,6 - bld r21,2 - bst r6,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r2,7 - bld r21,5 - bst r4,7 - bld r21,6 - bst r6,7 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,0 - bld r20,0 - bst r3,0 - bld r20,1 - bst r5,0 - bld r20,2 - bst r7,0 - bld r20,3 - bst r23,1 - bld r20,4 - bst r3,1 - bld r20,5 - bst r5,1 - bld r20,6 - bst r7,1 - bld r20,7 - bst r23,2 - bld r21,0 - bst r3,2 - bld r21,1 - bst r5,2 - bld r21,2 - bst r7,2 - bld r21,3 - bst r23,3 - bld r21,4 - bst r3,3 - bld r21,5 - bst r5,3 - bld r21,6 - bst r7,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,4 - bld r20,0 - bst r3,4 - bld r20,1 - bst r5,4 - bld r20,2 - bst r7,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r3,5 - bld r20,5 - bst r5,5 - bld r20,6 - bst r7,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r3,6 - bld r21,1 - bst r5,6 - bld r21,2 - bst r7,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r3,7 - bld r21,5 - bst r5,7 - bld r21,6 - bst r7,7 - bld r21,7 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64t_encrypt, .-gift64t_encrypt - - .text -.global gift64t_decrypt - .type gift64t_decrypt, @function -gift64t_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 30 - ld r8,Z - ldd r9,Z+1 - ldd r10,Z+2 - ldd r11,Z+3 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - mov r0,r9 - mov r9,r8 - mov r8,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,0 - bst r20,1 - bld r2,0 - bst r20,2 - bld r4,0 - bst r20,3 - bld r6,0 - bst r20,4 - bld r22,1 - bst r20,5 - bld r2,1 - bst r20,6 - bld r4,1 - bst r20,7 - bld r6,1 - bst r21,0 - bld r22,2 - bst r21,1 - bld r2,2 - bst r21,2 - bld r4,2 - bst r21,3 - bld r6,2 - bst r21,4 - bld r22,3 - bst r21,5 - bld r2,3 - bst r21,6 - bld r4,3 - bst r21,7 - bld r6,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r22,4 - bst r20,1 - bld r2,4 - bst r20,2 - bld r4,4 - bst r20,3 - bld r6,4 - bst r20,4 - bld r22,5 - bst r20,5 - bld r2,5 - bst r20,6 - bld r4,5 - bst r20,7 - bld r6,5 - bst r21,0 - bld r22,6 - bst r21,1 - bld r2,6 - bst r21,2 - bld r4,6 - bst r21,3 - bld r6,6 - bst r21,4 - bld r22,7 - bst r21,5 - bld r2,7 - bst r21,6 - bld r4,7 - bst r21,7 - bld r6,7 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,0 - bst r20,1 - bld r3,0 - bst r20,2 - bld r5,0 - bst r20,3 - bld r7,0 - bst r20,4 - bld r23,1 - bst r20,5 - bld r3,1 - bst r20,6 - bld r5,1 - bst r20,7 - bld r7,1 - bst r21,0 - bld r23,2 - bst r21,1 - bld r3,2 - bst r21,2 - bld r5,2 - bst r21,3 - bld r7,2 - bst r21,4 - bld r23,3 - bst r21,5 - bld r3,3 - bst r21,6 - bld r5,3 - bst r21,7 - bld r7,3 - ld r20,X+ - ld r21,X+ - bst r20,0 - bld r23,4 - bst r20,1 - bld r3,4 - bst r20,2 - bld r5,4 - bst r20,3 - bld r7,4 - bst r20,4 - bld r23,5 - bst r20,5 - bld r3,5 - bst r20,6 - bld r5,5 - bst r20,7 - bld r7,5 - bst r21,0 - bld r23,6 - bst r21,1 - bld r3,6 - bst r21,2 - bld r5,6 - bst r21,3 - bld r7,6 - bst r21,4 - bld r23,7 - bst r21,5 - bld r3,7 - bst r21,6 - bld r5,7 - bst r21,7 - bld r7,7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,11 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,5 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,2 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,33 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,48 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,24 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,44 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,22 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,43 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,53 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,58 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,29 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,14 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,39 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,51 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,57 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,60 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,30 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,47 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,55 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,59 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,61 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,62 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,31 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r8,Y+1 - ldd r9,Y+2 - ldd r10,Y+3 - ldd r11,Y+4 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,15 - ldi r21,128 - eor r6,r20 - eor r7,r21 - eor r4,r18 - eor r5,r18 - rcall 1185f - std Y+1,r8 - std Y+2,r9 - std Y+3,r10 - std Y+4,r11 - ldd r8,Y+5 - ldd r9,Y+6 - ldd r10,Y+7 - ldd r11,Y+8 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,7 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+5,r8 - std Y+6,r9 - std Y+7,r10 - std Y+8,r11 - ldd r8,Y+9 - ldd r9,Y+10 - ldd r10,Y+11 - ldd r11,Y+12 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,3 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - std Y+9,r8 - std Y+10,r9 - std Y+11,r10 - std Y+12,r11 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ldi r20,1 - ldi r21,128 - eor r6,r20 - eor r7,r21 - rcall 1185f - rjmp 1374f -1185: - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - bst r22,1 - bld r0,0 - bst r23,4 - bld r22,1 - bst r22,3 - bld r23,4 - bst r22,4 - bld r22,3 - bst r0,0 - bld r22,4 - bst r22,2 - bld r0,0 - bst r23,0 - bld r22,2 - bst r0,0 - bld r23,0 - bst r22,5 - bld r0,0 - bst r23,5 - bld r22,5 - bst r23,7 - bld r23,5 - bst r22,7 - bld r23,7 - bst r0,0 - bld r22,7 - bst r22,6 - bld r0,0 - bst r23,1 - bld r22,6 - bst r23,6 - bld r23,1 - bst r23,3 - bld r23,6 - bst r0,0 - bld r23,3 - bst r2,0 - bld r0,0 - bst r2,4 - bld r2,0 - bst r2,5 - bld r2,4 - bst r2,1 - bld r2,5 - bst r0,0 - bld r2,1 - bst r2,2 - bld r0,0 - bst r3,4 - bld r2,2 - bst r2,7 - bld r3,4 - bst r3,1 - bld r2,7 - bst r0,0 - bld r3,1 - bst r2,3 - bld r0,0 - bst r3,0 - bld r2,3 - bst r2,6 - bld r3,0 - bst r3,5 - bld r2,6 - bst r0,0 - bld r3,5 - bst r3,2 - bld r0,0 - bst r3,6 - bld r3,2 - bst r3,7 - bld r3,6 - bst r3,3 - bld r3,7 - bst r0,0 - bld r3,3 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r4,2 - bld r5,2 - bst r0,0 - bld r4,2 - bst r4,1 - bld r0,0 - bst r4,4 - bld r4,1 - bst r5,1 - bld r4,4 - bst r4,6 - bld r5,1 - bst r0,0 - bld r4,6 - bst r4,3 - bld r0,0 - bst r5,4 - bld r4,3 - bst r5,3 - bld r5,4 - bst r5,6 - bld r5,3 - bst r0,0 - bld r5,6 - bst r4,7 - bld r0,0 - bst r5,5 - bld r4,7 - bst r0,0 - bld r5,5 - bst r6,0 - bld r0,0 - bst r7,4 - bld r6,0 - bst r7,7 - bld r7,4 - bst r6,3 - bld r7,7 - bst r0,0 - bld r6,3 - bst r6,1 - bld r0,0 - bst r7,0 - bld r6,1 - bst r7,6 - bld r7,0 - bst r6,7 - bld r7,6 - bst r0,0 - bld r6,7 - bst r6,2 - bld r0,0 - bst r6,4 - bld r6,2 - bst r7,5 - bld r6,4 - bst r7,3 - bld r7,5 - bst r0,0 - bld r7,3 - bst r6,5 - bld r0,0 - bst r7,1 - bld r6,5 - bst r7,2 - bld r7,1 - bst r6,6 - bld r7,2 - bst r0,0 - bld r6,6 - movw r20,r6 - movw r6,r22 - movw r22,r20 - and r20,r2 - and r21,r3 - eor r4,r20 - eor r5,r21 - com r6 - com r7 - eor r2,r6 - eor r3,r7 - eor r6,r4 - eor r7,r5 - mov r0,r22 - or r0,r2 - eor r4,r0 - mov r0,r23 - or r0,r3 - eor r5,r0 - mov r0,r2 - and r0,r6 - eor r22,r0 - mov r0,r3 - and r0,r7 - eor r23,r0 - mov r0,r22 - and r0,r4 - eor r2,r0 - mov r0,r23 - and r0,r5 - eor r3,r0 - ret -1374: - ldd r26,Y+17 - ldd r27,Y+18 - bst r22,0 - bld r20,0 - bst r2,0 - bld r20,1 - bst r4,0 - bld r20,2 - bst r6,0 - bld r20,3 - bst r22,1 - bld r20,4 - bst r2,1 - bld r20,5 - bst r4,1 - bld r20,6 - bst r6,1 - bld r20,7 - bst r22,2 - bld r21,0 - bst r2,2 - bld r21,1 - bst r4,2 - bld r21,2 - bst r6,2 - bld r21,3 - bst r22,3 - bld r21,4 - bst r2,3 - bld r21,5 - bst r4,3 - bld r21,6 - bst r6,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r22,4 - bld r20,0 - bst r2,4 - bld r20,1 - bst r4,4 - bld r20,2 - bst r6,4 - bld r20,3 - bst r22,5 - bld r20,4 - bst r2,5 - bld r20,5 - bst r4,5 - bld r20,6 - bst r6,5 - bld r20,7 - bst r22,6 - bld r21,0 - bst r2,6 - bld r21,1 - bst r4,6 - bld r21,2 - bst r6,6 - bld r21,3 - bst r22,7 - bld r21,4 - bst r2,7 - bld r21,5 - bst r4,7 - bld r21,6 - bst r6,7 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,0 - bld r20,0 - bst r3,0 - bld r20,1 - bst r5,0 - bld r20,2 - bst r7,0 - bld r20,3 - bst r23,1 - bld r20,4 - bst r3,1 - bld r20,5 - bst r5,1 - bld r20,6 - bst r7,1 - bld r20,7 - bst r23,2 - bld r21,0 - bst r3,2 - bld r21,1 - bst r5,2 - bld r21,2 - bst r7,2 - bld r21,3 - bst r23,3 - bld r21,4 - bst r3,3 - bld r21,5 - bst r5,3 - bld r21,6 - bst r7,3 - bld r21,7 - st X+,r20 - st X+,r21 - bst r23,4 - bld r20,0 - bst r3,4 - bld r20,1 - bst r5,4 - bld r20,2 - bst r7,4 - bld r20,3 - bst r23,5 - bld r20,4 - bst r3,5 - bld r20,5 - bst r5,5 - bld r20,6 - bst r7,5 - bld r20,7 - bst r23,6 - bld r21,0 - bst r3,6 - bld r21,1 - bst r5,6 - bld r21,2 - bst r7,6 - bld r21,3 - bst r23,7 - bld r21,4 - bst r3,7 - bld r21,5 - bst r5,7 - bld r21,6 - bst r7,7 - bld r21,7 - st X+,r20 - st X+,r21 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift64t_decrypt, .-gift64t_decrypt - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.c b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.c deleted file mode 100644 index 81bc8a3..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.c +++ /dev/null @@ -1,1205 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift64.h" -#include "internal-util.h" -#include - -#if !GIFT64_LOW_MEMORY - -/* Round constants for GIFT-64 in the fixsliced representation */ -static uint32_t const GIFT64_RC[28] = { - 0x22000011, 0x00002299, 0x11118811, 0x880000ff, 0x33111199, 0x990022ee, - 0x22119933, 0x880033bb, 0x22119999, 0x880022ff, 0x11119922, 0x880033cc, - 0x33008899, 0x99002299, 0x33118811, 0x880000ee, 0x33110099, 0x990022aa, - 0x22118833, 0x880022bb, 0x22111188, 0x88002266, 0x00009922, 0x88003300, - 0x22008811, 0x00002288, 0x00118811, 0x880000bb -}; - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift64b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t t = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= t; \ - (a) ^= t << (shift); \ - } while (0) - -/** - * \brief Performs the GIFT-64 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift64b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-64 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift64b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/* Rotates a state word left by 1 position in the fixsliced representation: - * - * 0 1 2 3 1 2 3 0 - * 4 5 6 7 ==> 5 6 7 4 - * 8 9 10 11 9 10 11 8 - * 12 13 14 15 13 14 14 12 - */ -#define gift64b_rotate_left_1(x) \ - ((((x) >> 1) & 0x77777777U) | (((x) & 0x11111111U) << 3)) - -/* Rotates a state word left by 2 positions in the fixsliced representation: - * - * 0 1 2 3 2 3 0 1 - * 4 5 6 7 ==> 6 7 4 5 - * 8 9 10 11 10 11 8 9 - * 12 13 14 15 14 15 12 13 - */ -#define gift64b_rotate_left_2(x) \ - ((((x) >> 2) & 0x33333333U) | (((x) & 0x33333333U) << 2)) - -/* Rotates a state word left by 3 positions in the fixsliced representation: - * - * 0 1 2 3 3 0 1 2 - * 4 5 6 7 ==> 7 4 5 6 - * 8 9 10 11 11 8 9 10 - * 12 13 14 15 15 12 13 14 - */ -#define gift64b_rotate_left_3(x) \ - ((((x) >> 3) & 0x11111111U) | (((x) & 0x77777777U) << 1)) - -/* Rotates a state word right by 1 position in the fixsliced representation */ -#define gift64b_rotate_right_1(x) gift64b_rotate_left_3(x) - -/* Rotates a state word right by 2 positions in the fixsliced representation */ -#define gift64b_rotate_right_2(x) gift64b_rotate_left_2(x) - -/* Rotates a state word right by 3 positions in the fixsliced representation */ -#define gift64b_rotate_right_3(x) gift64b_rotate_left_1(x) - -/* Rotates a state word up by 1 position in the fixsliced representation: - * - * 0 1 2 3 4 5 6 7 - * 4 5 6 7 ==> 8 9 10 11 - * 8 9 10 11 12 13 14 15 - * 12 13 14 15 0 1 2 3 - */ -#define gift64b_rotate_up_1(x) (rightRotate8((x))) - -/* Rotates a state word up by 2 positions in the fixsliced representation: - * - * 0 1 2 3 8 9 10 11 - * 4 5 6 7 ==> 12 13 14 15 - * 8 9 10 11 0 1 2 3 - * 12 13 14 15 4 5 6 7 - */ -#define gift64b_rotate_up_2(x) (rightRotate16((x))) - -/* Rotates a state word up by 3 positions in the fixsliced representation: - * - * 0 1 2 3 12 13 14 15 - * 4 5 6 7 ==> 0 1 2 3 - * 8 9 10 11 4 5 6 7 - * 12 13 14 15 8 9 10 11 - */ -#define gift64b_rotate_up_3(x) (rightRotate24((x))) - -/* Rotates a state word down by 1 position in the fixsliced representation */ -#define gift64b_rotate_down_1(x) gift64b_rotate_up_3(x) - -/* Rotates a state word down by 2 positions in the fixsliced representation */ -#define gift64b_rotate_down_2(x) gift64b_rotate_up_2(x) - -/* Rotates a state word down by 3 positions in the fixsliced representation */ -#define gift64b_rotate_down_3(x) gift64b_rotate_up_1(x) - -/* Permutation code to rearrange key bits into fixsliced form. Permutations - * generated wth "http://programming.sirrida.de/calcperm.php" */ -#define gift64b_rearrange1_transpose_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 8 16 24 3 11 19 27 2 10 18 26 1 9 17 25 * */ \ - bit_permute_step(out, 0x0000CCCCU, 16); \ - bit_permute_step(out, 0x30030330U, 2); \ - bit_permute_step(out, 0x00960096U, 8); \ - bit_permute_step(out, 0x05500550U, 1); \ - bit_permute_step(out, 0x0A0A0A0AU, 4); \ - } while (0) -#define gift64b_rearrange1_transpose_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 8 16 24 3 11 19 27 2 10 18 26 1 9 17 25 * */ \ - bit_permute_step(out, 0x0000CCCCU, 16); \ - bit_permute_step(out, 0x30030330U, 2); \ - bit_permute_step(out, 0x00960096U, 8); \ - bit_permute_step(out, 0x05500550U, 1); \ - bit_permute_step(out, 0x0A0A0A0AU, 4); \ - } while (0) -#define gift64b_rearrange1_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 1 2 3 24 25 26 27 16 17 18 19 8 9 10 11 * */ \ - out = (out & 0x0000000FU) | ((out & 0x00000F00U) << 8) | \ - ((out & 0x000000F0U) << 20) | ((out & 0x0000F000U) >> 4); \ - } while (0) -#define gift64b_rearrange1_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 1 2 3 24 25 26 27 16 17 18 19 8 9 10 11 * */ \ - out = (out & 0x0000000FU) | ((out & 0x00000F00U) << 8) | \ - ((out & 0x000000F0U) << 20) | ((out & 0x0000F000U) >> 4); \ - } while (0) -#define gift64b_rearrange2_transpose_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 * */ \ - bit_permute_step(out, 0x0A0A0A0AU, 3); \ - bit_permute_step(out, 0x00CC00CCU, 6); \ - bit_permute_step(out, 0x0000F0F0U, 12); \ - bit_permute_step(out, 0x0000FF00U, 8); \ - } while (0) -#define gift64b_rearrange2_transpose_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 * */ \ - bit_permute_step(out, 0x0A0A0A0AU, 3); \ - bit_permute_step(out, 0x00CC00CCU, 6); \ - bit_permute_step(out, 0x0000F0F0U, 12); \ - bit_permute_step(out, 0x0000FF00U, 8); \ - } while (0) -#define gift64b_rearrange2_low(out, in) \ - do { \ - out = (in) & 0x0000FFFFU; \ - /* 0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27 * */ \ - out = (out & 0x0000000FU) | ((out & 0x000000F0U) << 4) | \ - ((out & 0x00000F00U) << 8) | ((out & 0x0000F000U) << 12); \ - } while (0) -#define gift64b_rearrange2_high(out, in) \ - do { \ - out = (in) >> 16; \ - /* 0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27 * */ \ - out = (out & 0x0000000FU) | ((out & 0x000000F0U) << 4) | \ - ((out & 0x00000F00U) << 8) | ((out & 0x0000F000U) << 12); \ - } while (0) - -void gift64n_update_round_keys(gift64n_key_schedule_t *ks) -{ - uint32_t x; - - /* First round */ - gift64b_rearrange1_transpose_low(x, ks->k[3]); - ks->rk[0] = ~(x | (x << 4)); - gift64b_rearrange1_transpose_high(x, ks->k[3]); - ks->rk[1] = x | (x << 4); - - /* Second round */ - gift64b_rearrange1_low(x, ks->k[2]); - x = x | (x << 4); - gift64b_swap_move(x, x, 0x22222222U, 2); - ks->rk[2] = ~x; - gift64b_rearrange1_high(x, ks->k[2]); - x = x | (x << 4); - gift64b_swap_move(x, x, 0x22222222U, 2); - ks->rk[3] = x; - - /* Third round */ - gift64b_rearrange2_transpose_low(x, ks->k[1]); - gift64b_swap_move(x, x, 0x00000F00U, 16); - ks->rk[4] = ~(x | (x << 4)); - gift64b_rearrange2_transpose_high(x, ks->k[1]); - gift64b_swap_move(x, x, 0x00000F00U, 16); - ks->rk[5] = x | (x << 4); - - /* Fourth round */ - gift64b_rearrange2_low(x, ks->k[0]); - ks->rk[6] = ~(x | (x << 4)); - gift64b_rearrange2_high(x, ks->k[0]); - ks->rk[7] = x | (x << 4); -} - -/** - * \brief Perform the core of GIFT-64 encryption on two blocks in parallel. - * - * \param ks Points to the key schedule to use to encrypt the blocks. - * \param state Buffer containing the two blocks in bit-sliced form, - * on input and output. - * \param Tweak value or zero if there is no tweak. - */ -static void gift64b_encrypt_core - (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) -{ - const uint32_t *rc = GIFT64_RC; - uint32_t s0, s1, s2, s3, temp; - uint32_t rk[8]; - uint8_t round; - - /* Start with the pre-computed round keys for the first four rounds */ - memcpy(rk, ks->rk, sizeof(ks->rk)); - - /* Load the state into local variables */ - s0 = state[0]; - s1 = state[1]; - s2 = state[2]; - s3 = state[3]; - - /* Perform all 28 rounds four at a time. We use the "fixslicing" method. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of four rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 4 rounds. - */ - for (round = 0; round < 28; round += 4, rc += 4) { - /* 1st round - S-box, rotate left, add round key */ - gift64b_sbox(s0, s1, s2, s3); - s1 = gift64b_rotate_left_1(s1); - s2 = gift64b_rotate_left_2(s2); - s0 = gift64b_rotate_left_3(s0); - s3 ^= rk[0]; - s1 ^= rk[1]; - s0 ^= rc[0]; - - /* 2nd round - S-box, rotate up, add round key (s0 and s3 swapped) */ - gift64b_sbox(s3, s1, s2, s0); - s1 = gift64b_rotate_up_1(s1); - s2 = gift64b_rotate_up_2(s2); - s3 = gift64b_rotate_up_3(s3); - s0 ^= rk[2]; - s1 ^= rk[3]; - s3 ^= rc[1]; - - /* 3rd round - S-box, rotate right, add round key */ - gift64b_sbox(s0, s1, s2, s3); - s1 = gift64b_rotate_right_1(s1); - s2 = gift64b_rotate_right_2(s2); - s0 = gift64b_rotate_right_3(s0); - s3 ^= rk[4]; - s1 ^= rk[5]; - s0 ^= rc[2]; - - /* 4th round - S-box, rotate down, add round key (s0 and s3 swapped) */ - gift64b_sbox(s3, s1, s2, s0); - s1 = gift64b_rotate_down_1(s1); - s2 = gift64b_rotate_down_2(s2); - s3 = gift64b_rotate_down_3(s3); - s0 ^= rk[6]; - s1 ^= rk[7]; - s3 ^= rc[3]; - - /* Add the tweak every four encryption rounds except the last */ - if (round < 24) - s2 ^= tweak; - - /* Derive the round keys for the next 4 rounds */ - rk[0] = gift64b_rotate_left_1(rk[0]); - rk[1] = (gift64b_rotate_left_3(rk[1]) << 16) | (rk[1] >> 16); - rk[2] = rightRotate8(rk[2]); - temp = gift64b_rotate_left_2(rk[3]); - rk[3] = (temp & 0x99999999U) | leftRotate8(temp & 0x66666666U); - rk[4] = gift64b_rotate_left_3(rk[4]); - temp = rightRotate16(rk[5]); - rk[5] = (gift64b_rotate_left_1(temp) & 0x00FFFF00U) | - (temp & 0xFF0000FFU); - rk[6] = leftRotate8(rk[6]); - temp = gift64b_rotate_left_2(rk[7]); - rk[7] = (temp & 0x33333333U) | rightRotate8(temp & 0xCCCCCCCCU); - } - - /* Copy the local variables to the output state */ - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; -} - -/** - * \brief Perform the core of GIFT-64 decryption on two blocks in parallel. - * - * \param ks Points to the key schedule to use to encrypt the blocks. - * \param state Buffer containing the two blocks in bit-sliced form, - * on input and output. - * \param Tweak value or zero if there is no tweak. - */ -static void gift64b_decrypt_core - (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) -{ - const uint32_t *rc = GIFT64_RC + 28 - 4; - uint32_t s0, s1, s2, s3, temp; - uint32_t rk[8]; - uint8_t round; - - /* Start with the pre-computed round keys for the first four rounds */ - memcpy(rk, ks->rk, sizeof(ks->rk)); - - /* Fast forward the key schedule to the end by permuting each round - * key by the amount it would see under the full set of rounds. - * Generated with "http://programming.sirrida.de/calcperm.php" */ - /* P0: 1 2 3 0 5 6 7 4 9 10 11 8 13 14 15 12 17 18 - * 19 16 21 22 23 20 25 26 27 24 29 30 31 28 */ - rk[0] = ((rk[0] & 0x77777777U) << 1) | ((rk[0] & 0x88888888U) >> 3); - /* P1: 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 - * 31 3 0 1 2 7 4 5 6 11 8 9 10 15 12 13 14 */ - rk[1] = ((rk[1] & 0xEEEE0000U) >> 17) | ((rk[1] & 0x0000FFFFU) << 16) | - ((rk[1] & 0x11110000U) >> 13); - /* P2: 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 - * 24 25 26 27 28 29 30 31 0 1 2 3 4 5 6 7 */ - rk[2] = leftRotate8(rk[2]); - /* P3: 2 27 24 1 6 31 28 5 10 3 0 9 14 7 4 13 18 11 - * 8 17 22 15 12 21 26 19 16 25 30 23 20 29 */ - rk[3] = ((rk[3] & 0x11111111U) << 2) | leftRotate22(rk[3] & 0x44444444U) | - leftRotate26(rk[3] & 0x22222222U) | ((rk[3] & 0x88888888U) >> 2); - /* P4: 3 0 1 2 7 4 5 6 11 8 9 10 15 12 13 14 19 16 - * 17 18 23 20 21 22 27 24 25 26 31 28 29 30 */ - rk[4] = ((rk[4] & 0x11111111U) << 3) | ((rk[4] & 0xEEEEEEEEU) >> 1); - /* P5: 16 17 18 19 20 21 22 23 25 26 27 24 29 30 31 - * 28 1 2 3 0 5 6 7 4 8 9 10 11 12 13 14 15 */ - rk[5] = leftRotate13(rk[5] & 0x00888800U) | - leftRotate16(rk[5] & 0xFF0000FFU) | - leftRotate17(rk[5] & 0x00777700U); - /* P6: 24 25 26 27 28 29 30 31 0 1 2 3 4 5 6 7 8 9 10 - * 11 12 13 14 15 16 17 18 19 20 21 22 23 */ - rk[6] = leftRotate24(rk[6]); - /* P7: 2 3 8 9 6 7 12 13 10 11 16 17 14 15 20 21 18 19 - * 24 25 22 23 28 29 26 27 0 1 30 31 4 5 */ - rk[7] = ((rk[7] & 0x33333333U) << 2) | leftRotate6(rk[7] & 0xCCCCCCCCU); - - /* Load the state into local variables */ - s0 = state[0]; - s1 = state[1]; - s2 = state[2]; - s3 = state[3]; - - /* Perform all 28 rounds four at a time. We use the "fixslicing" method. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of four rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 4 rounds. - */ - for (round = 0; round < 28; round += 4, rc -= 4) { - /* Derive the round keys for the previous 4 rounds */ - rk[0] = gift64b_rotate_right_1(rk[0]); - temp = rk[1] >> 16; - rk[1] = gift64b_rotate_right_3(temp) | (rk[1] << 16); - rk[2] = leftRotate8(rk[2]); - temp = (rk[3] & 0x99999999U) | rightRotate8(rk[3] & 0x66666666U); - rk[3] = gift64b_rotate_right_2(temp); - rk[4] = gift64b_rotate_right_3(rk[4]); - temp = (gift64b_rotate_right_1(rk[5]) & 0x00FFFF00U) | - (rk[5] & 0xFF0000FFU); - rk[5] = leftRotate16(temp); - rk[6] = rightRotate8(rk[6]); - temp = (rk[7] & 0x33333333U) | leftRotate8(rk[7] & 0xCCCCCCCCU); - rk[7] = gift64b_rotate_right_2(temp); - - /* Add the tweak every four decryption rounds except the first */ - if (round != 0) - s2 ^= tweak; - - /* 4th round - S-box, rotate down, add round key (s0 and s3 swapped) */ - s0 ^= rk[6]; - s1 ^= rk[7]; - s3 ^= rc[3]; - s1 = gift64b_rotate_up_1(s1); - s2 = gift64b_rotate_up_2(s2); - s3 = gift64b_rotate_up_3(s3); - gift64b_inv_sbox(s0, s1, s2, s3); - - /* 3rd round - S-box, rotate right, add round key */ - s3 ^= rk[4]; - s1 ^= rk[5]; - s0 ^= rc[2]; - s1 = gift64b_rotate_left_1(s1); - s2 = gift64b_rotate_left_2(s2); - s0 = gift64b_rotate_left_3(s0); - gift64b_inv_sbox(s3, s1, s2, s0); - - /* 2nd round - S-box, rotate up, add round key (s0 and s3 swapped) */ - s0 ^= rk[2]; - s1 ^= rk[3]; - s3 ^= rc[1]; - s1 = gift64b_rotate_down_1(s1); - s2 = gift64b_rotate_down_2(s2); - s3 = gift64b_rotate_down_3(s3); - gift64b_inv_sbox(s0, s1, s2, s3); - - /* 1st round - S-box, rotate left, add round key */ - s3 ^= rk[0]; - s1 ^= rk[1]; - s0 ^= rc[0]; - s1 = gift64b_rotate_right_1(s1); - s2 = gift64b_rotate_right_2(s2); - s0 = gift64b_rotate_right_3(s0); - gift64b_inv_sbox(s3, s1, s2, s0); - } - - /* Copy the local variables to the output state */ - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; -} - -void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian byte order from the LOTUS-AEAD submission */ - ks->k[0] = le_load_word32(key + 12); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key); - gift64n_update_round_keys(ks); -} - -/** - * \brief Converts the GIFT-64 nibble-based representation into word-based - * (littlen-endian version). - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The output words will be in fixsliced form. Technically the output will - * contain two blocks for gift64b_encrypt_core() to process in parallel but - * both blocks will have the same value. - */ -static void gift64n_to_words(uint32_t output[4], const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input block into 32-bit words */ - s0 = le_load_word32(input); - s2 = le_load_word32(input + 4); - - /* Rearrange the bits in the block */ - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); - - /* Split into two identical blocks in fixsliced form */ - s1 = s0; - s3 = s2; - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -/** - * \brief Converts the GIFT-64 word-based representation into nibble-based - * (little-endian version). - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - * - * The input words are in fixsliced form. Technically there are two - * identical blocks in the input. We drop one when we write to the output. - */ -static void gift64n_to_nibbles(unsigned char *output, const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Load the state and split the two blocks into separate words */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); - - /* Rearrange the bits in the first block back into nibble form */ - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - le_store_word32(output, s0); - le_store_word32(output + 4, s2); -} - -void gift64n_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_encrypt_core(ks, state, 0); - gift64n_to_nibbles(output, state); -} - -void gift64n_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_decrypt_core(ks, state, 0); - gift64n_to_nibbles(output, state); -} - -/* 4-bit tweak values expanded to 32-bit in fixsliced form */ -static uint32_t const GIFT64_tweaks[16] = { - 0x00000000, 0xee11ee11, 0xdd22dd22, 0x33333333, 0xbb44bb44, 0x55555555, - 0x66666666, 0x88778877, 0x77887788, 0x99999999, 0xaaaaaaaa, 0x44bb44bb, - 0xcccccccc, 0x22dd22dd, 0x11ee11ee, 0xffffffff -}; - -void gift64t_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_encrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); - gift64n_to_nibbles(output, state); -} - -void gift64t_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_decrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); - gift64n_to_nibbles(output, state); -} - -#elif !defined(__AVR__) /* GIFT64_LOW_MEMORY */ - -/* Round constants for GIFT-64 */ -static uint8_t const GIFT64_RC[28] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B -}; - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint16_t y = (_y); \ - uint16_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step_simple */ -#define bit_permute_step_simple(_y, mask, shift) \ - do { \ - (_y) = (((_y) & (mask)) << (shift)) | (((_y) >> (shift)) & (mask)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 4 bits with respect to the next: - * - * P0: 0 12 8 4 1 13 9 5 2 14 10 6 3 15 11 7 - * P1: 4 0 12 8 5 1 13 9 6 2 14 10 7 3 15 11 - * P2: 8 4 0 12 9 5 1 13 10 6 2 14 11 7 3 15 - * P3: 12 8 4 0 13 9 5 1 14 10 6 2 15 11 7 3 - * - * The most efficient permutation from the online generator was P1, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P1 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM1_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a, 3); \ - bit_permute_step(x, 0x00cc, 6); \ - bit_permute_step_simple(x, 0x0f0f, 4); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM1_INNER(_x); \ - (x) = leftRotate12_16(_x); \ - } while (0) -#define PERM1(x) PERM1_INNER(x) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM1_INNER(_x); \ - (x) = leftRotate4_16(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM1_INNER(_x); \ - (x) = leftRotate8_16(_x); \ - } while (0) - -#define INV_PERM1_INNER(x) \ - do { \ - bit_permute_step(x, 0x0505, 5); \ - bit_permute_step(x, 0x00cc, 6); \ - bit_permute_step_simple(x, 0x0f0f, 4); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate12_16(x); \ - INV_PERM1_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) INV_PERM1_INNER(x) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate4_16(x); \ - INV_PERM1_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = rightRotate8_16(x); \ - INV_PERM1_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Encrypts a 64-bit block with GIFT-64 (bit-sliced). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -static void gift64b_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word16(input); - s1 = be_load_word16(input + 2); - s2 = be_load_word16(input + 4); - s3 = be_load_word16(input + 6); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - - /* Perform all 28 rounds */ - for (round = 0; round < 28; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 64-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); -} - -/** - * \brief Decrypts a 64-bit block with GIFT-64 (bit-sliced). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -static void gift64b_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word16(input); - s1 = be_load_word16(input + 2); - s2 = be_load_word16(input + 4); - s3 = be_load_word16(input + 6); - - /* Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 28; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 7 times for the full 28 rounds. The overall - * effect is to apply a "14 right and 28 left" bit-rotation to every word - * in the key schedule. That is equivalent to "14 right and 12 left" - * on the 16-bit sub-words. - */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | - ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); - w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | - ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); - w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | - ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); - w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | - ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); - - /* Perform all 28 rounds */ - for (round = 28; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); -} - -void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian byte order from the LOTUS-AEAD submission */ - ks->k[0] = le_load_word32(key + 12); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key); -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step_32(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Converts the GIFT-64 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift64n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1; - - /* Load the input buffer into 32-bit words. We use the nibble order from - * the LOTUS-AEAD submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-64 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 4); - s1 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step_32(x, 0x0a0a0a0a, 3); \ - bit_permute_step_32(x, 0x00cc00cc, 6); \ - bit_permute_step_32(x, 0x0000f0f0, 12); \ - bit_permute_step_32(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)(s0 >> 8); - output[3] = (uint8_t)(s1 >> 8); - output[4] = (uint8_t)(s0 >> 16); - output[5] = (uint8_t)(s1 >> 16); - output[6] = (uint8_t)(s0 >> 24); - output[7] = (uint8_t)(s1 >> 24); -} - -/** - * \brief Converts the GIFT-64 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift64n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s1 contains the least significant */ - s0 = (((uint32_t)(input[6])) << 24) | - (((uint32_t)(input[4])) << 16) | - (((uint32_t)(input[2])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[7])) << 24) | - (((uint32_t)(input[5])) << 16) | - (((uint32_t)(input[3])) << 8) | - ((uint32_t)(input[1])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step_32(x, 0x00aa00aa, 7); \ - bit_permute_step_32(x, 0x0000cccc, 14); \ - bit_permute_step_32(x, 0x00f000f0, 4); \ - bit_permute_step_32(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 4, s0); - le_store_word32(output, s1); -} - -void gift64n_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift64n_to_words(output, input); - gift64b_encrypt(ks, output, output); - gift64n_to_nibbles(output, output); -} - -void gift64n_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift64n_to_words(output, input); - gift64b_decrypt(ks, output, output); - gift64n_to_nibbles(output, output); -} - -void gift64t_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift64n_to_words(output, input); - s0 = be_load_word16(output); - s1 = be_load_word16(output + 2); - s2 = be_load_word16(output + 4); - s3 = be_load_word16(output + 6); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - - /* Perform all 28 rounds */ - for (round = 0; round < 28; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 64-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round]; - - /* AddTweak - XOR in the tweak every 4 rounds except the last */ - if (((round + 1) % 4) == 0 && round < 27) - s2 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); - gift64n_to_nibbles(output, output); -} - -void gift64t_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak) -{ - uint16_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift64n_to_words(output, input); - s0 = be_load_word16(output); - s1 = be_load_word16(output + 2); - s2 = be_load_word16(output + 4); - s3 = be_load_word16(output + 6); - - /* Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 28; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 7 times for the full 28 rounds. The overall - * effect is to apply a "14 right and 28 left" bit-rotation to every word - * in the key schedule. That is equivalent to "14 right and 12 left" - * on the 16-bit sub-words. - */ - w0 = ks->k[0]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[3]; - w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | - ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); - w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | - ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); - w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | - ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); - w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | - ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); - - /* Perform all 28 rounds */ - for (round = 28; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 4 rounds except the last */ - if ((round % 4) == 0 && round != 28) - s2 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s0 ^= (uint16_t)w3; - s1 ^= (uint16_t)(w3 >> 16); - s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word16(output, s0); - be_store_word16(output + 2, s1); - be_store_word16(output + 4, s2); - be_store_word16(output + 6, s3); - gift64n_to_nibbles(output, output); -} - -#endif /* GIFT64_LOW_MEMORY */ diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.h b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.h deleted file mode 100644 index 010359b..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-gift64.h +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT64_H -#define LW_INTERNAL_GIFT64_H - -/** - * \file internal-gift64.h - * \brief GIFT-64 block cipher. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \var GIFT64_LOW_MEMORY - * \brief Define this to 1 to use a low memory version of the key schedule. - * - * The default is to use the fix-sliced version of GIFT-64 which is very - * fast on 32-bit platforms but requires 48 bytes to store the key schedule. - * The large key schedule may be a problem on 8-bit and 16-bit platforms. - * The fix-sliced version also encrypts two blocks at a time in 32-bit - * words which is an unnecessary optimization for 8-bit platforms. - * - * GIFT64_LOW_MEMORY can be defined to 1 to select the original non - * fix-sliced version which only requires 16 bytes to store the key, - * with the rest of the key schedule expanded on the fly. - */ -#if !defined(GIFT64_LOW_MEMORY) -#if defined(__AVR__) -#define GIFT64_LOW_MEMORY 1 -#else -#define GIFT64_LOW_MEMORY 0 -#endif -#endif - -/** - * \brief Size of a GIFT-64 block in bytes. - */ -#define GIFT64_BLOCK_SIZE 8 - -/** - * \brief Structure of the key schedule for GIFT-64. - */ -typedef struct -{ - uint32_t k[4]; /**< Words of the key schedule */ -#if !GIFT64_LOW_MEMORY - uint32_t rk[8]; /**< Pre-computed round keys for fixsliced form */ -#endif - -} gift64n_key_schedule_t; - -/** - * \fn void gift64n_update_round_keys(gift64n_key_schedule_t *ks); - * \brief Updates the round keys after a change in the base key. - * - * \param ks Points to the key schedule to update. - */ -#if GIFT64_LOW_MEMORY -#define gift64n_update_round_keys(ks) do { ; } while (0) /* Not needed */ -#else -void gift64n_update_round_keys(gift64n_key_schedule_t *ks); -#endif - -/** - * \brief Initializes the key schedule for GIFT-64 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 64-bit block with GIFT-64 (nibble-based). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift64n_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 64-bit block with GIFT-64 (nibble-based). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift64n_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 16-bit for TweGIFT-64 */ -#define GIFT64T_TWEAK_0 0x0000 /**< TweGIFT-64 tweak value 0 */ -#define GIFT64T_TWEAK_1 0xe1e1 /**< TweGIFT-64 tweak value 1 */ -#define GIFT64T_TWEAK_2 0xd2d2 /**< TweGIFT-64 tweak value 2 */ -#define GIFT64T_TWEAK_3 0x3333 /**< TweGIFT-64 tweak value 3 */ -#define GIFT64T_TWEAK_4 0xb4b4 /**< TweGIFT-64 tweak value 4 */ -#define GIFT64T_TWEAK_5 0x5555 /**< TweGIFT-64 tweak value 5 */ -#define GIFT64T_TWEAK_6 0x6666 /**< TweGIFT-64 tweak value 6 */ -#define GIFT64T_TWEAK_7 0x8787 /**< TweGIFT-64 tweak value 7 */ -#define GIFT64T_TWEAK_8 0x7878 /**< TweGIFT-64 tweak value 8 */ -#define GIFT64T_TWEAK_9 0x9999 /**< TweGIFT-64 tweak value 9 */ -#define GIFT64T_TWEAK_10 0xaaaa /**< TweGIFT-64 tweak value 10 */ -#define GIFT64T_TWEAK_11 0x4b4b /**< TweGIFT-64 tweak value 11 */ -#define GIFT64T_TWEAK_12 0xcccc /**< TweGIFT-64 tweak value 12 */ -#define GIFT64T_TWEAK_13 0x2d2d /**< TweGIFT-64 tweak value 13 */ -#define GIFT64T_TWEAK_14 0x1e1e /**< TweGIFT-64 tweak value 14 */ -#define GIFT64T_TWEAK_15 0xffff /**< TweGIFT-64 tweak value 15 */ - -/** - * \brief Encrypts a 64-bit block with TweGIFT-64 (tweakable variant). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value expanded to 16-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-64 is used by the LOTUS/LOCUS submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift64n_encrypt(). - */ -void gift64t_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak); - -/** - * \brief Decrypts a 64-bit block with TweGIFT-64 (tweakable variant). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value expanded to 16-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-64 is used by the LOTUS/LOCUS submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift64n_decrypt(). - */ -void gift64t_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint16_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-util.h b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.c b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.c deleted file mode 100644 index 4a1efd0..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.c +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "lotus-locus.h" -#include "internal-gift64.h" -#include "internal-util.h" -#include - -aead_cipher_t const lotus_aead_cipher = { - "LOTUS-AEAD", - LOTUS_AEAD_KEY_SIZE, - LOTUS_AEAD_NONCE_SIZE, - LOTUS_AEAD_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - lotus_aead_encrypt, - lotus_aead_decrypt -}; - -aead_cipher_t const locus_aead_cipher = { - "LOCUS-AEAD", - LOCUS_AEAD_KEY_SIZE, - LOCUS_AEAD_NONCE_SIZE, - LOCUS_AEAD_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - locus_aead_encrypt, - locus_aead_decrypt -}; - -/** - * \brief Multiplies a key by 2 in the GF(128) field. - * - * \param ks The key schedule structure containing the key in host byte order. - */ -STATIC_INLINE void lotus_or_locus_mul_2(gift64n_key_schedule_t *ks) -{ - uint32_t mask = (uint32_t)(((int32_t)(ks->k[0])) >> 31); - ks->k[0] = (ks->k[0] << 1) | (ks->k[1] >> 31); - ks->k[1] = (ks->k[1] << 1) | (ks->k[2] >> 31); - ks->k[2] = (ks->k[2] << 1) | (ks->k[3] >> 31); - ks->k[3] = (ks->k[3] << 1) ^ (mask & 0x87); - gift64n_update_round_keys(ks); -} - -/** - * \brief Initializes a LOTUS-AEAD or LOCUS-AEAD cipher instance. - * - * \param ks Key schedule to initialize. - * \param deltaN Delta-N value for the cipher state. - * \param key Points to the 16-byte key for the cipher instance. - * \param nonce Points to the 16-byte key for the cipher instance. - * \param T Points to a temporary buffer of LOTUS_AEAD_KEY_SIZE bytes - * that will be destroyed during this function. - */ -static void lotus_or_locus_init - (gift64n_key_schedule_t *ks, - unsigned char deltaN[GIFT64_BLOCK_SIZE], - const unsigned char *key, - const unsigned char *nonce, - unsigned char *T) -{ - gift64n_init(ks, key); - memset(deltaN, 0, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_0); - lw_xor_block_2_src(T, key, nonce, LOTUS_AEAD_KEY_SIZE); - gift64n_init(ks, T); - gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_1); -} - -/** - * \brief Processes associated data for LOTUS-AEAD or LOCUS-AEAD. - * - * \param ks Points to the key schedule. - * \param deltaN Points to the Delta-N value from the state. - * \param V Points to the V value from the state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void lotus_or_locus_process_ad - (gift64n_key_schedule_t *ks, - const unsigned char deltaN[GIFT64_BLOCK_SIZE], - unsigned char V[GIFT64_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char X[GIFT64_BLOCK_SIZE]; - unsigned char temp; - while (adlen > GIFT64_BLOCK_SIZE) { - lotus_or_locus_mul_2(ks); - lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); - lw_xor_block(V, X, GIFT64_BLOCK_SIZE); - ad += GIFT64_BLOCK_SIZE; - adlen -= GIFT64_BLOCK_SIZE; - } - lotus_or_locus_mul_2(ks); - temp = (unsigned)adlen; - if (temp < GIFT64_BLOCK_SIZE) { - memcpy(X, deltaN, GIFT64_BLOCK_SIZE); - lw_xor_block(X, ad, temp); - X[temp] ^= 0x01; - gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_3); - } else { - lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); - } - lw_xor_block(V, X, GIFT64_BLOCK_SIZE); -} - -/** - * \brief Generates the authentication tag for LOTUS-AEAD or LOCUS-AEAD. - * - * \param ks Points to the key schedule. - * \param tag Points to the buffer to receive the authentication tag. - * \param deltaN Points to the Delta-N value from the state. - * \param W Points to the W value from the state. - * \param V Points to the V value from the state. - */ -static void lotus_or_locus_gen_tag - (gift64n_key_schedule_t *ks, unsigned char *tag, - unsigned char deltaN[GIFT64_BLOCK_SIZE], - unsigned char W[GIFT64_BLOCK_SIZE], - unsigned char V[GIFT64_BLOCK_SIZE]) -{ - lotus_or_locus_mul_2(ks); - lw_xor_block(W, deltaN, GIFT64_BLOCK_SIZE); - lw_xor_block(W, V, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, W, W, GIFT64T_TWEAK_6); - lw_xor_block_2_src(tag, W, deltaN, GIFT64_BLOCK_SIZE); -} - -int lotus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X1[GIFT64_BLOCK_SIZE]; - unsigned char X2[GIFT64_BLOCK_SIZE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + LOTUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > (GIFT64_BLOCK_SIZE * 2)) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X1, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_4); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); - lw_xor_block_2_src - (X2, m + GIFT64_BLOCK_SIZE, X2, GIFT64_BLOCK_SIZE); - lw_xor_block_2_src(c, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); - lw_xor_block_2_src - (c + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE * 2; - m += GIFT64_BLOCK_SIZE * 2; - mlen -= GIFT64_BLOCK_SIZE * 2; - } - temp = (unsigned)mlen; - lotus_or_locus_mul_2(&ks); - memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); - X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); - if (temp <= GIFT64_BLOCK_SIZE) { - lw_xor_block(WV, m, temp); - lw_xor_block(X2, m, temp); - lw_xor_block_2_src(c, X2, deltaN, temp); - } else { - lw_xor_block(X2, m, GIFT64_BLOCK_SIZE); - lw_xor_block_2_src(c, X2, deltaN, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(WV, m, temp); - lw_xor_block(X1, X2, temp); - lw_xor_block_2_src(c, X1, m, temp); - } - c += temp; - } - - /* Generate the authentication tag */ - lotus_or_locus_gen_tag(&ks, c, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return 0; -} - -int lotus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X1[GIFT64_BLOCK_SIZE]; - unsigned char X2[GIFT64_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < LOTUS_AEAD_TAG_SIZE) - return -1; - *mlen = clen - LOTUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= LOTUS_AEAD_TAG_SIZE; - if (clen > 0) { - while (clen > (GIFT64_BLOCK_SIZE * 2)) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X1, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_5); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); - lw_xor_block(X2, c + GIFT64_BLOCK_SIZE, GIFT64_BLOCK_SIZE); - lw_xor_block_2_src(m, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); - lw_xor_block_2_src - (m + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE * 2; - m += GIFT64_BLOCK_SIZE * 2; - clen -= GIFT64_BLOCK_SIZE * 2; - } - temp = (unsigned)clen; - lotus_or_locus_mul_2(&ks); - memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); - X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); - if (temp <= GIFT64_BLOCK_SIZE) { - lw_xor_block_2_src(m, X2, c, temp); - lw_xor_block(m, deltaN, temp); - lw_xor_block(X2, m, temp); - lw_xor_block(WV, m, temp); - } else { - lw_xor_block_2_src(m, X2, c, GIFT64_BLOCK_SIZE); - lw_xor_block(m, deltaN, GIFT64_BLOCK_SIZE); - lw_xor_block(X2, m, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); - lw_xor_block(X1, X2, temp); - lw_xor_block_2_src(m, X1, c, temp); - lw_xor_block(WV, m, temp); - } - c += temp; - } - - /* Check the authentication tag */ - lotus_or_locus_gen_tag(&ks, WV, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return aead_check_tag(mtemp, *mlen, WV, c, LOTUS_AEAD_TAG_SIZE); -} - -int locus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X[GIFT64_BLOCK_SIZE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + LOCUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > GIFT64_BLOCK_SIZE) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block_2_src(c, X, deltaN, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - mlen -= GIFT64_BLOCK_SIZE; - } - temp = (unsigned)mlen; - lotus_or_locus_mul_2(&ks); - memcpy(X, deltaN, GIFT64_BLOCK_SIZE); - X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - lw_xor_block(WV, m, temp); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(X, deltaN, temp); - lw_xor_block_2_src(c, m, X, temp); - c += temp; - } - - /* Generate the authentication tag */ - lotus_or_locus_gen_tag(&ks, c, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return 0; -} - -int locus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - gift64n_key_schedule_t ks; - unsigned char WV[GIFT64_BLOCK_SIZE * 2]; - unsigned char deltaN[GIFT64_BLOCK_SIZE]; - unsigned char X[GIFT64_BLOCK_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < LOCUS_AEAD_TAG_SIZE) - return -1; - *mlen = clen - LOCUS_AEAD_TAG_SIZE; - - /* Initialize the state with the key and the nonce */ - lotus_or_locus_init(&ks, deltaN, k, npub, WV); - memset(WV, 0, sizeof(WV)); - - /* Process the associated data */ - if (adlen > 0) { - lotus_or_locus_process_ad - (&ks, deltaN, WV + GIFT64_BLOCK_SIZE, ad, adlen); - } - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= LOCUS_AEAD_TAG_SIZE; - if (clen > 0) { - while (clen > GIFT64_BLOCK_SIZE) { - lotus_or_locus_mul_2(&ks); - lw_xor_block_2_src(X, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); - lw_xor_block_2_src(m, X, deltaN, GIFT64_BLOCK_SIZE); - c += GIFT64_BLOCK_SIZE; - m += GIFT64_BLOCK_SIZE; - clen -= GIFT64_BLOCK_SIZE; - } - temp = (unsigned)clen; - lotus_or_locus_mul_2(&ks); - memcpy(X, deltaN, GIFT64_BLOCK_SIZE); - X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); - lw_xor_block(X, deltaN, temp); - lw_xor_block_2_src(m, c, X, temp); - lw_xor_block(WV, m, temp); - c += temp; - } - - /* Check the authentication tag */ - lotus_or_locus_gen_tag(&ks, WV, deltaN, WV, WV + GIFT64_BLOCK_SIZE); - return aead_check_tag(mtemp, *mlen, WV, c, LOCUS_AEAD_TAG_SIZE); -} diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.h b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.h deleted file mode 100644 index 85434a8..0000000 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys-avr/lotus-locus.h +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_LOTUS_LOCUS_H -#define LWCRYPTO_LOTUS_LOCUS_H - -#include "aead-common.h" - -/** - * \file lotus-locus.h - * \brief LOTUS-AEAD and LOCUS-AEAD authenticated encryption algorithms. - * - * LOTUS-AEAD and LOCUS-AEAD are authenticated encryption algorithms - * that are based around a tweakable variant of the GIFT-64 block cipher - * called TweGIFT-64. Both AEAD algorithms have a 128-bit key, a 128-bit - * nonce, and a 64-bit tag. - * - * The two algorithms have the same key initialization, associated data - * processing, and tag generation mechanisms. They differ in how the - * input is encrypted with TweGIFT-64. - * - * LOTUS-AEAD uses a method similar to the block cipher mode OTR. - * TweGIFT-64 is essentially converted into a 128-bit block cipher - * using a Feistel construction and four TweGIFT-64 block operations - * every 16 bytes of input. - * - * LOCUS-AEAD uses a method similar to the block cipher mode OCB - * with two TweGIFT-64 block operations for every 8 bytes of input. - * LOCUS-AEAD requires both the block encrypt and block decrypt - * operations of TweGIFT-64, which increases the overall code size. - * LOTUS-AEAD only needs the block encrypt operation. - * - * LOTUS-AEAD is the primary member of the family. - * - * References: https://www.isical.ac.in/~lightweight/lotus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for LOTUS-AEAD. - */ -#define LOTUS_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for LOTUS-AEAD. - */ -#define LOTUS_AEAD_TAG_SIZE 8 - -/** - * \brief Size of the nonce for LOTUS-AEAD. - */ -#define LOTUS_AEAD_NONCE_SIZE 16 - -/** - * \brief Size of the key for LOCUS-AEAD. - */ -#define LOCUS_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for LOCUS-AEAD. - */ -#define LOCUS_AEAD_TAG_SIZE 8 - -/** - * \brief Size of the nonce for LOCUS-AEAD. - */ -#define LOCUS_AEAD_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the LOTUS-AEAD cipher. - */ -extern aead_cipher_t const lotus_aead_cipher; - -/** - * \brief Meta-information block for the LOCUS-AEAD cipher. - */ -extern aead_cipher_t const locus_aead_cipher; - -/** - * \brief Encrypts and authenticates a packet with LOTUS-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa lotus_aead_decrypt() - */ -int lotus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with LOTUS-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 9 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa lotus_aead_encrypt() - */ -int lotus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with LOCUS-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa locus_aead_decrypt() - */ -int locus_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with LOCUS-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 9 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa locus_aead_encrypt() - */ -int locus_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64-avr.S b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64-avr.S new file mode 100644 index 0000000..fdb668d --- /dev/null +++ b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64-avr.S @@ -0,0 +1,6047 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global gift64n_init + .type gift64n_init, @function +gift64n_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ret + .size gift64n_init, .-gift64n_init + + .text +.global gift64n_encrypt + .type gift64n_encrypt, @function +gift64n_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 28 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Z+4 + ldd r7,Z+5 + ldd r8,Z+6 + ldd r9,Z+7 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Z+12 + ldd r7,Z+13 + ldd r8,Z+14 + ldd r9,Z+15 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,0 + bst r18,1 + bld r22,0 + bst r18,2 + bld r2,0 + bst r18,3 + bld r4,0 + bst r18,4 + bld r20,1 + bst r18,5 + bld r22,1 + bst r18,6 + bld r2,1 + bst r18,7 + bld r4,1 + bst r19,0 + bld r20,2 + bst r19,1 + bld r22,2 + bst r19,2 + bld r2,2 + bst r19,3 + bld r4,2 + bst r19,4 + bld r20,3 + bst r19,5 + bld r22,3 + bst r19,6 + bld r2,3 + bst r19,7 + bld r4,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,4 + bst r18,1 + bld r22,4 + bst r18,2 + bld r2,4 + bst r18,3 + bld r4,4 + bst r18,4 + bld r20,5 + bst r18,5 + bld r22,5 + bst r18,6 + bld r2,5 + bst r18,7 + bld r4,5 + bst r19,0 + bld r20,6 + bst r19,1 + bld r22,6 + bst r19,2 + bld r2,6 + bst r19,3 + bld r4,6 + bst r19,4 + bld r20,7 + bst r19,5 + bld r22,7 + bst r19,6 + bld r2,7 + bst r19,7 + bld r4,7 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,0 + bst r18,1 + bld r23,0 + bst r18,2 + bld r3,0 + bst r18,3 + bld r5,0 + bst r18,4 + bld r21,1 + bst r18,5 + bld r23,1 + bst r18,6 + bld r3,1 + bst r18,7 + bld r5,1 + bst r19,0 + bld r21,2 + bst r19,1 + bld r23,2 + bst r19,2 + bld r3,2 + bst r19,3 + bld r5,2 + bst r19,4 + bld r21,3 + bst r19,5 + bld r23,3 + bst r19,6 + bld r3,3 + bst r19,7 + bld r5,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,4 + bst r18,1 + bld r23,4 + bst r18,2 + bld r3,4 + bst r18,3 + bld r5,4 + bst r18,4 + bld r21,5 + bst r18,5 + bld r23,5 + bst r18,6 + bld r3,5 + bst r18,7 + bld r5,5 + bst r19,0 + bld r21,6 + bst r19,1 + bld r23,6 + bst r19,2 + bld r3,6 + bst r19,3 + bld r5,6 + bst r19,4 + bld r21,7 + bst r19,5 + bld r23,7 + bst r19,6 + bld r3,7 + bst r19,7 + bld r5,7 + rcall 1061f + ldi r18,1 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,3 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,7 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,15 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,31 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,62 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,61 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,59 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,55 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,47 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,30 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,60 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,57 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,51 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,39 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,14 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,29 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,58 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,53 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,43 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,22 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,44 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,24 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,48 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + rcall 1061f + ldi r18,33 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + rcall 1061f + ldi r18,2 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + rcall 1061f + ldi r18,5 + ldi r19,128 + eor r4,r18 + eor r5,r19 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + rcall 1061f + ldi r18,11 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rjmp 1252f +1061: + mov r0,r20 + and r0,r2 + eor r22,r0 + mov r0,r21 + and r0,r3 + eor r23,r0 + mov r0,r22 + and r0,r4 + eor r20,r0 + mov r0,r23 + and r0,r5 + eor r21,r0 + mov r0,r20 + or r0,r22 + eor r2,r0 + mov r0,r21 + or r0,r23 + eor r3,r0 + eor r4,r2 + eor r5,r3 + eor r22,r4 + eor r23,r5 + com r4 + com r5 + movw r18,r20 + mov r0,r22 + and r0,r18 + eor r2,r0 + mov r0,r23 + and r0,r19 + eor r3,r0 + movw r20,r4 + movw r4,r18 + bst r20,1 + bld r0,0 + bst r20,4 + bld r20,1 + bst r20,3 + bld r20,4 + bst r21,4 + bld r20,3 + bst r0,0 + bld r21,4 + bst r20,2 + bld r0,0 + bst r21,0 + bld r20,2 + bst r0,0 + bld r21,0 + bst r20,5 + bld r0,0 + bst r20,7 + bld r20,5 + bst r21,7 + bld r20,7 + bst r21,5 + bld r21,7 + bst r0,0 + bld r21,5 + bst r20,6 + bld r0,0 + bst r21,3 + bld r20,6 + bst r21,6 + bld r21,3 + bst r21,1 + bld r21,6 + bst r0,0 + bld r21,1 + bst r22,0 + bld r0,0 + bst r22,1 + bld r22,0 + bst r22,5 + bld r22,1 + bst r22,4 + bld r22,5 + bst r0,0 + bld r22,4 + bst r22,2 + bld r0,0 + bst r23,1 + bld r22,2 + bst r22,7 + bld r23,1 + bst r23,4 + bld r22,7 + bst r0,0 + bld r23,4 + bst r22,3 + bld r0,0 + bst r23,5 + bld r22,3 + bst r22,6 + bld r23,5 + bst r23,0 + bld r22,6 + bst r0,0 + bld r23,0 + bst r23,2 + bld r0,0 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r23,6 + bld r23,7 + bst r0,0 + bld r23,6 + bst r2,0 + bld r0,0 + bst r2,2 + bld r2,0 + bst r3,2 + bld r2,2 + bst r3,0 + bld r3,2 + bst r0,0 + bld r3,0 + bst r2,1 + bld r0,0 + bst r2,6 + bld r2,1 + bst r3,1 + bld r2,6 + bst r2,4 + bld r3,1 + bst r0,0 + bld r2,4 + bst r2,3 + bld r0,0 + bst r3,6 + bld r2,3 + bst r3,3 + bld r3,6 + bst r3,4 + bld r3,3 + bst r0,0 + bld r3,4 + bst r2,7 + bld r0,0 + bst r3,5 + bld r2,7 + bst r0,0 + bld r3,5 + bst r4,0 + bld r0,0 + bst r4,3 + bld r4,0 + bst r5,7 + bld r4,3 + bst r5,4 + bld r5,7 + bst r0,0 + bld r5,4 + bst r4,1 + bld r0,0 + bst r4,7 + bld r4,1 + bst r5,6 + bld r4,7 + bst r5,0 + bld r5,6 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,3 + bld r4,2 + bst r5,5 + bld r5,3 + bst r4,4 + bld r5,5 + bst r0,0 + bld r4,4 + bst r4,5 + bld r0,0 + bst r4,6 + bld r4,5 + bst r5,2 + bld r4,6 + bst r5,1 + bld r5,2 + bst r0,0 + bld r5,1 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + ret +1252: + ldd r26,Y+17 + ldd r27,Y+18 + bst r20,0 + bld r18,0 + bst r22,0 + bld r18,1 + bst r2,0 + bld r18,2 + bst r4,0 + bld r18,3 + bst r20,1 + bld r18,4 + bst r22,1 + bld r18,5 + bst r2,1 + bld r18,6 + bst r4,1 + bld r18,7 + bst r20,2 + bld r19,0 + bst r22,2 + bld r19,1 + bst r2,2 + bld r19,2 + bst r4,2 + bld r19,3 + bst r20,3 + bld r19,4 + bst r22,3 + bld r19,5 + bst r2,3 + bld r19,6 + bst r4,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r20,4 + bld r18,0 + bst r22,4 + bld r18,1 + bst r2,4 + bld r18,2 + bst r4,4 + bld r18,3 + bst r20,5 + bld r18,4 + bst r22,5 + bld r18,5 + bst r2,5 + bld r18,6 + bst r4,5 + bld r18,7 + bst r20,6 + bld r19,0 + bst r22,6 + bld r19,1 + bst r2,6 + bld r19,2 + bst r4,6 + bld r19,3 + bst r20,7 + bld r19,4 + bst r22,7 + bld r19,5 + bst r2,7 + bld r19,6 + bst r4,7 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,0 + bld r18,0 + bst r23,0 + bld r18,1 + bst r3,0 + bld r18,2 + bst r5,0 + bld r18,3 + bst r21,1 + bld r18,4 + bst r23,1 + bld r18,5 + bst r3,1 + bld r18,6 + bst r5,1 + bld r18,7 + bst r21,2 + bld r19,0 + bst r23,2 + bld r19,1 + bst r3,2 + bld r19,2 + bst r5,2 + bld r19,3 + bst r21,3 + bld r19,4 + bst r23,3 + bld r19,5 + bst r3,3 + bld r19,6 + bst r5,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,4 + bld r18,0 + bst r23,4 + bld r18,1 + bst r3,4 + bld r18,2 + bst r5,4 + bld r18,3 + bst r21,5 + bld r18,4 + bst r23,5 + bld r18,5 + bst r3,5 + bld r18,6 + bst r5,5 + bld r18,7 + bst r21,6 + bld r19,0 + bst r23,6 + bld r19,1 + bst r3,6 + bld r19,2 + bst r5,6 + bld r19,3 + bst r21,7 + bld r19,4 + bst r23,7 + bld r19,5 + bst r3,7 + bld r19,6 + bst r5,7 + bld r19,7 + st X+,r18 + st X+,r19 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64n_encrypt, .-gift64n_encrypt + + .text +.global gift64n_decrypt + .type gift64n_decrypt, @function +gift64n_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 28 + ld r6,Z + ldd r7,Z+1 + ldd r8,Z+2 + ldd r9,Z+3 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Z+4 + ldd r7,Z+5 + ldd r8,Z+6 + ldd r9,Z+7 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Z+8 + ldd r7,Z+9 + ldd r8,Z+10 + ldd r9,Z+11 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Z+12 + ldd r7,Z+13 + ldd r8,Z+14 + ldd r9,Z+15 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,0 + bst r18,1 + bld r22,0 + bst r18,2 + bld r2,0 + bst r18,3 + bld r4,0 + bst r18,4 + bld r20,1 + bst r18,5 + bld r22,1 + bst r18,6 + bld r2,1 + bst r18,7 + bld r4,1 + bst r19,0 + bld r20,2 + bst r19,1 + bld r22,2 + bst r19,2 + bld r2,2 + bst r19,3 + bld r4,2 + bst r19,4 + bld r20,3 + bst r19,5 + bld r22,3 + bst r19,6 + bld r2,3 + bst r19,7 + bld r4,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r20,4 + bst r18,1 + bld r22,4 + bst r18,2 + bld r2,4 + bst r18,3 + bld r4,4 + bst r18,4 + bld r20,5 + bst r18,5 + bld r22,5 + bst r18,6 + bld r2,5 + bst r18,7 + bld r4,5 + bst r19,0 + bld r20,6 + bst r19,1 + bld r22,6 + bst r19,2 + bld r2,6 + bst r19,3 + bld r4,6 + bst r19,4 + bld r20,7 + bst r19,5 + bld r22,7 + bst r19,6 + bld r2,7 + bst r19,7 + bld r4,7 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,0 + bst r18,1 + bld r23,0 + bst r18,2 + bld r3,0 + bst r18,3 + bld r5,0 + bst r18,4 + bld r21,1 + bst r18,5 + bld r23,1 + bst r18,6 + bld r3,1 + bst r18,7 + bld r5,1 + bst r19,0 + bld r21,2 + bst r19,1 + bld r23,2 + bst r19,2 + bld r3,2 + bst r19,3 + bld r5,2 + bst r19,4 + bld r21,3 + bst r19,5 + bld r23,3 + bst r19,6 + bld r3,3 + bst r19,7 + bld r5,3 + ld r18,X+ + ld r19,X+ + bst r18,0 + bld r21,4 + bst r18,1 + bld r23,4 + bst r18,2 + bld r3,4 + bst r18,3 + bld r5,4 + bst r18,4 + bld r21,5 + bst r18,5 + bld r23,5 + bst r18,6 + bld r3,5 + bst r18,7 + bld r5,5 + bst r19,0 + bld r21,6 + bst r19,1 + bld r23,6 + bst r19,2 + bld r3,6 + bst r19,3 + bld r5,6 + bst r19,4 + bld r21,7 + bst r19,5 + bld r23,7 + bst r19,6 + bld r3,7 + bst r19,7 + bld r5,7 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,11 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,5 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,2 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,33 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,48 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,24 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,44 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,22 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,43 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,53 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,58 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,29 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,14 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,39 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,51 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,57 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,60 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,30 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,47 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,55 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,59 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,61 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,62 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,31 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r6,Y+1 + ldd r7,Y+2 + ldd r8,Y+3 + ldd r9,Y+4 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,15 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+1,r6 + std Y+2,r7 + std Y+3,r8 + std Y+4,r9 + ldd r6,Y+5 + ldd r7,Y+6 + ldd r8,Y+7 + ldd r9,Y+8 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,7 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+5,r6 + std Y+6,r7 + std Y+7,r8 + std Y+8,r9 + ldd r6,Y+9 + ldd r7,Y+10 + ldd r8,Y+11 + ldd r9,Y+12 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,3 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + std Y+9,r6 + std Y+10,r7 + std Y+11,r8 + std Y+12,r9 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + mov r0,r1 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + lsr r7 + ror r6 + ror r0 + or r7,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + ldi r18,1 + ldi r19,128 + eor r4,r18 + eor r5,r19 + rcall 1173f + rjmp 1362f +1173: + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + bst r20,1 + bld r0,0 + bst r21,4 + bld r20,1 + bst r20,3 + bld r21,4 + bst r20,4 + bld r20,3 + bst r0,0 + bld r20,4 + bst r20,2 + bld r0,0 + bst r21,0 + bld r20,2 + bst r0,0 + bld r21,0 + bst r20,5 + bld r0,0 + bst r21,5 + bld r20,5 + bst r21,7 + bld r21,5 + bst r20,7 + bld r21,7 + bst r0,0 + bld r20,7 + bst r20,6 + bld r0,0 + bst r21,1 + bld r20,6 + bst r21,6 + bld r21,1 + bst r21,3 + bld r21,6 + bst r0,0 + bld r21,3 + bst r22,0 + bld r0,0 + bst r22,4 + bld r22,0 + bst r22,5 + bld r22,4 + bst r22,1 + bld r22,5 + bst r0,0 + bld r22,1 + bst r22,2 + bld r0,0 + bst r23,4 + bld r22,2 + bst r22,7 + bld r23,4 + bst r23,1 + bld r22,7 + bst r0,0 + bld r23,1 + bst r22,3 + bld r0,0 + bst r23,0 + bld r22,3 + bst r22,6 + bld r23,0 + bst r23,5 + bld r22,6 + bst r0,0 + bld r23,5 + bst r23,2 + bld r0,0 + bst r23,6 + bld r23,2 + bst r23,7 + bld r23,6 + bst r23,3 + bld r23,7 + bst r0,0 + bld r23,3 + bst r2,0 + bld r0,0 + bst r3,0 + bld r2,0 + bst r3,2 + bld r3,0 + bst r2,2 + bld r3,2 + bst r0,0 + bld r2,2 + bst r2,1 + bld r0,0 + bst r2,4 + bld r2,1 + bst r3,1 + bld r2,4 + bst r2,6 + bld r3,1 + bst r0,0 + bld r2,6 + bst r2,3 + bld r0,0 + bst r3,4 + bld r2,3 + bst r3,3 + bld r3,4 + bst r3,6 + bld r3,3 + bst r0,0 + bld r3,6 + bst r2,7 + bld r0,0 + bst r3,5 + bld r2,7 + bst r0,0 + bld r3,5 + bst r4,0 + bld r0,0 + bst r5,4 + bld r4,0 + bst r5,7 + bld r5,4 + bst r4,3 + bld r5,7 + bst r0,0 + bld r4,3 + bst r4,1 + bld r0,0 + bst r5,0 + bld r4,1 + bst r5,6 + bld r5,0 + bst r4,7 + bld r5,6 + bst r0,0 + bld r4,7 + bst r4,2 + bld r0,0 + bst r4,4 + bld r4,2 + bst r5,5 + bld r4,4 + bst r5,3 + bld r5,5 + bst r0,0 + bld r5,3 + bst r4,5 + bld r0,0 + bst r5,1 + bld r4,5 + bst r5,2 + bld r5,1 + bst r4,6 + bld r5,2 + bst r0,0 + bld r4,6 + movw r18,r4 + movw r4,r20 + movw r20,r18 + and r18,r22 + and r19,r23 + eor r2,r18 + eor r3,r19 + com r4 + com r5 + eor r22,r4 + eor r23,r5 + eor r4,r2 + eor r5,r3 + mov r0,r20 + or r0,r22 + eor r2,r0 + mov r0,r21 + or r0,r23 + eor r3,r0 + mov r0,r22 + and r0,r4 + eor r20,r0 + mov r0,r23 + and r0,r5 + eor r21,r0 + mov r0,r20 + and r0,r2 + eor r22,r0 + mov r0,r21 + and r0,r3 + eor r23,r0 + ret +1362: + ldd r26,Y+17 + ldd r27,Y+18 + bst r20,0 + bld r18,0 + bst r22,0 + bld r18,1 + bst r2,0 + bld r18,2 + bst r4,0 + bld r18,3 + bst r20,1 + bld r18,4 + bst r22,1 + bld r18,5 + bst r2,1 + bld r18,6 + bst r4,1 + bld r18,7 + bst r20,2 + bld r19,0 + bst r22,2 + bld r19,1 + bst r2,2 + bld r19,2 + bst r4,2 + bld r19,3 + bst r20,3 + bld r19,4 + bst r22,3 + bld r19,5 + bst r2,3 + bld r19,6 + bst r4,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r20,4 + bld r18,0 + bst r22,4 + bld r18,1 + bst r2,4 + bld r18,2 + bst r4,4 + bld r18,3 + bst r20,5 + bld r18,4 + bst r22,5 + bld r18,5 + bst r2,5 + bld r18,6 + bst r4,5 + bld r18,7 + bst r20,6 + bld r19,0 + bst r22,6 + bld r19,1 + bst r2,6 + bld r19,2 + bst r4,6 + bld r19,3 + bst r20,7 + bld r19,4 + bst r22,7 + bld r19,5 + bst r2,7 + bld r19,6 + bst r4,7 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,0 + bld r18,0 + bst r23,0 + bld r18,1 + bst r3,0 + bld r18,2 + bst r5,0 + bld r18,3 + bst r21,1 + bld r18,4 + bst r23,1 + bld r18,5 + bst r3,1 + bld r18,6 + bst r5,1 + bld r18,7 + bst r21,2 + bld r19,0 + bst r23,2 + bld r19,1 + bst r3,2 + bld r19,2 + bst r5,2 + bld r19,3 + bst r21,3 + bld r19,4 + bst r23,3 + bld r19,5 + bst r3,3 + bld r19,6 + bst r5,3 + bld r19,7 + st X+,r18 + st X+,r19 + bst r21,4 + bld r18,0 + bst r23,4 + bld r18,1 + bst r3,4 + bld r18,2 + bst r5,4 + bld r18,3 + bst r21,5 + bld r18,4 + bst r23,5 + bld r18,5 + bst r3,5 + bld r18,6 + bst r5,5 + bld r18,7 + bst r21,6 + bld r19,0 + bst r23,6 + bld r19,1 + bst r3,6 + bld r19,2 + bst r5,6 + bld r19,3 + bst r21,7 + bld r19,4 + bst r23,7 + bld r19,5 + bst r3,7 + bld r19,6 + bst r5,7 + bld r19,7 + st X+,r18 + st X+,r19 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64n_decrypt, .-gift64n_decrypt + + .text +.global gift64t_encrypt + .type gift64t_encrypt, @function +gift64t_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 30 + ld r8,Z + ldd r9,Z+1 + ldd r10,Z+2 + ldd r11,Z+3 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,0 + bst r20,1 + bld r2,0 + bst r20,2 + bld r4,0 + bst r20,3 + bld r6,0 + bst r20,4 + bld r22,1 + bst r20,5 + bld r2,1 + bst r20,6 + bld r4,1 + bst r20,7 + bld r6,1 + bst r21,0 + bld r22,2 + bst r21,1 + bld r2,2 + bst r21,2 + bld r4,2 + bst r21,3 + bld r6,2 + bst r21,4 + bld r22,3 + bst r21,5 + bld r2,3 + bst r21,6 + bld r4,3 + bst r21,7 + bld r6,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,4 + bst r20,1 + bld r2,4 + bst r20,2 + bld r4,4 + bst r20,3 + bld r6,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r2,5 + bst r20,6 + bld r4,5 + bst r20,7 + bld r6,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r2,6 + bst r21,2 + bld r4,6 + bst r21,3 + bld r6,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r2,7 + bst r21,6 + bld r4,7 + bst r21,7 + bld r6,7 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,0 + bst r20,1 + bld r3,0 + bst r20,2 + bld r5,0 + bst r20,3 + bld r7,0 + bst r20,4 + bld r23,1 + bst r20,5 + bld r3,1 + bst r20,6 + bld r5,1 + bst r20,7 + bld r7,1 + bst r21,0 + bld r23,2 + bst r21,1 + bld r3,2 + bst r21,2 + bld r5,2 + bst r21,3 + bld r7,2 + bst r21,4 + bld r23,3 + bst r21,5 + bld r3,3 + bst r21,6 + bld r5,3 + bst r21,7 + bld r7,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,4 + bst r20,1 + bld r3,4 + bst r20,2 + bld r5,4 + bst r20,3 + bld r7,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r3,5 + bst r20,6 + bld r5,5 + bst r20,7 + bld r7,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r3,6 + bst r21,2 + bld r5,6 + bst r21,3 + bld r7,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r3,7 + bst r21,6 + bld r5,7 + bst r21,7 + bld r7,7 + rcall 1073f + ldi r20,1 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,3 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,7 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,15 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,31 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,62 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,61 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,59 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,55 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,47 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,30 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,60 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,57 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,51 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,39 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,14 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,29 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,58 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,53 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,43 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,22 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,44 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,24 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,48 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + rcall 1073f + ldi r20,33 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + rcall 1073f + ldi r20,2 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + rcall 1073f + ldi r20,5 + ldi r21,128 + eor r6,r20 + eor r7,r21 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + rcall 1073f + ldi r20,11 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rjmp 1264f +1073: + mov r0,r22 + and r0,r4 + eor r2,r0 + mov r0,r23 + and r0,r5 + eor r3,r0 + mov r0,r2 + and r0,r6 + eor r22,r0 + mov r0,r3 + and r0,r7 + eor r23,r0 + mov r0,r22 + or r0,r2 + eor r4,r0 + mov r0,r23 + or r0,r3 + eor r5,r0 + eor r6,r4 + eor r7,r5 + eor r2,r6 + eor r3,r7 + com r6 + com r7 + movw r20,r22 + mov r0,r2 + and r0,r20 + eor r4,r0 + mov r0,r3 + and r0,r21 + eor r5,r0 + movw r22,r6 + movw r6,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r22,3 + bld r22,4 + bst r23,4 + bld r22,3 + bst r0,0 + bld r23,4 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r0,0 + bld r23,0 + bst r22,5 + bld r0,0 + bst r22,7 + bld r22,5 + bst r23,7 + bld r22,7 + bst r23,5 + bld r23,7 + bst r0,0 + bld r23,5 + bst r22,6 + bld r0,0 + bst r23,3 + bld r22,6 + bst r23,6 + bld r23,3 + bst r23,1 + bld r23,6 + bst r0,0 + bld r23,1 + bst r2,0 + bld r0,0 + bst r2,1 + bld r2,0 + bst r2,5 + bld r2,1 + bst r2,4 + bld r2,5 + bst r0,0 + bld r2,4 + bst r2,2 + bld r0,0 + bst r3,1 + bld r2,2 + bst r2,7 + bld r3,1 + bst r3,4 + bld r2,7 + bst r0,0 + bld r3,4 + bst r2,3 + bld r0,0 + bst r3,5 + bld r2,3 + bst r2,6 + bld r3,5 + bst r3,0 + bld r2,6 + bst r0,0 + bld r3,0 + bst r3,2 + bld r0,0 + bst r3,3 + bld r3,2 + bst r3,7 + bld r3,3 + bst r3,6 + bld r3,7 + bst r0,0 + bld r3,6 + bst r4,0 + bld r0,0 + bst r4,2 + bld r4,0 + bst r5,2 + bld r4,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,1 + bld r0,0 + bst r4,6 + bld r4,1 + bst r5,1 + bld r4,6 + bst r4,4 + bld r5,1 + bst r0,0 + bld r4,4 + bst r4,3 + bld r0,0 + bst r5,6 + bld r4,3 + bst r5,3 + bld r5,6 + bst r5,4 + bld r5,3 + bst r0,0 + bld r5,4 + bst r4,7 + bld r0,0 + bst r5,5 + bld r4,7 + bst r0,0 + bld r5,5 + bst r6,0 + bld r0,0 + bst r6,3 + bld r6,0 + bst r7,7 + bld r6,3 + bst r7,4 + bld r7,7 + bst r0,0 + bld r7,4 + bst r6,1 + bld r0,0 + bst r6,7 + bld r6,1 + bst r7,6 + bld r6,7 + bst r7,0 + bld r7,6 + bst r0,0 + bld r7,0 + bst r6,2 + bld r0,0 + bst r7,3 + bld r6,2 + bst r7,5 + bld r7,3 + bst r6,4 + bld r7,5 + bst r0,0 + bld r6,4 + bst r6,5 + bld r0,0 + bst r6,6 + bld r6,5 + bst r7,2 + bld r6,6 + bst r7,1 + bld r7,2 + bst r0,0 + bld r7,1 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ret +1264: + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r20,0 + bst r2,0 + bld r20,1 + bst r4,0 + bld r20,2 + bst r6,0 + bld r20,3 + bst r22,1 + bld r20,4 + bst r2,1 + bld r20,5 + bst r4,1 + bld r20,6 + bst r6,1 + bld r20,7 + bst r22,2 + bld r21,0 + bst r2,2 + bld r21,1 + bst r4,2 + bld r21,2 + bst r6,2 + bld r21,3 + bst r22,3 + bld r21,4 + bst r2,3 + bld r21,5 + bst r4,3 + bld r21,6 + bst r6,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r22,4 + bld r20,0 + bst r2,4 + bld r20,1 + bst r4,4 + bld r20,2 + bst r6,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r2,5 + bld r20,5 + bst r4,5 + bld r20,6 + bst r6,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r2,6 + bld r21,1 + bst r4,6 + bld r21,2 + bst r6,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r2,7 + bld r21,5 + bst r4,7 + bld r21,6 + bst r6,7 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,0 + bld r20,0 + bst r3,0 + bld r20,1 + bst r5,0 + bld r20,2 + bst r7,0 + bld r20,3 + bst r23,1 + bld r20,4 + bst r3,1 + bld r20,5 + bst r5,1 + bld r20,6 + bst r7,1 + bld r20,7 + bst r23,2 + bld r21,0 + bst r3,2 + bld r21,1 + bst r5,2 + bld r21,2 + bst r7,2 + bld r21,3 + bst r23,3 + bld r21,4 + bst r3,3 + bld r21,5 + bst r5,3 + bld r21,6 + bst r7,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,4 + bld r20,0 + bst r3,4 + bld r20,1 + bst r5,4 + bld r20,2 + bst r7,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r3,5 + bld r20,5 + bst r5,5 + bld r20,6 + bst r7,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r3,6 + bld r21,1 + bst r5,6 + bld r21,2 + bst r7,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r3,7 + bld r21,5 + bst r5,7 + bld r21,6 + bst r7,7 + bld r21,7 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64t_encrypt, .-gift64t_encrypt + + .text +.global gift64t_decrypt + .type gift64t_decrypt, @function +gift64t_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 30 + ld r8,Z + ldd r9,Z+1 + ldd r10,Z+2 + ldd r11,Z+3 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + mov r0,r9 + mov r9,r8 + mov r8,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,0 + bst r20,1 + bld r2,0 + bst r20,2 + bld r4,0 + bst r20,3 + bld r6,0 + bst r20,4 + bld r22,1 + bst r20,5 + bld r2,1 + bst r20,6 + bld r4,1 + bst r20,7 + bld r6,1 + bst r21,0 + bld r22,2 + bst r21,1 + bld r2,2 + bst r21,2 + bld r4,2 + bst r21,3 + bld r6,2 + bst r21,4 + bld r22,3 + bst r21,5 + bld r2,3 + bst r21,6 + bld r4,3 + bst r21,7 + bld r6,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r22,4 + bst r20,1 + bld r2,4 + bst r20,2 + bld r4,4 + bst r20,3 + bld r6,4 + bst r20,4 + bld r22,5 + bst r20,5 + bld r2,5 + bst r20,6 + bld r4,5 + bst r20,7 + bld r6,5 + bst r21,0 + bld r22,6 + bst r21,1 + bld r2,6 + bst r21,2 + bld r4,6 + bst r21,3 + bld r6,6 + bst r21,4 + bld r22,7 + bst r21,5 + bld r2,7 + bst r21,6 + bld r4,7 + bst r21,7 + bld r6,7 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,0 + bst r20,1 + bld r3,0 + bst r20,2 + bld r5,0 + bst r20,3 + bld r7,0 + bst r20,4 + bld r23,1 + bst r20,5 + bld r3,1 + bst r20,6 + bld r5,1 + bst r20,7 + bld r7,1 + bst r21,0 + bld r23,2 + bst r21,1 + bld r3,2 + bst r21,2 + bld r5,2 + bst r21,3 + bld r7,2 + bst r21,4 + bld r23,3 + bst r21,5 + bld r3,3 + bst r21,6 + bld r5,3 + bst r21,7 + bld r7,3 + ld r20,X+ + ld r21,X+ + bst r20,0 + bld r23,4 + bst r20,1 + bld r3,4 + bst r20,2 + bld r5,4 + bst r20,3 + bld r7,4 + bst r20,4 + bld r23,5 + bst r20,5 + bld r3,5 + bst r20,6 + bld r5,5 + bst r20,7 + bld r7,5 + bst r21,0 + bld r23,6 + bst r21,1 + bld r3,6 + bst r21,2 + bld r5,6 + bst r21,3 + bld r7,6 + bst r21,4 + bld r23,7 + bst r21,5 + bld r3,7 + bst r21,6 + bld r5,7 + bst r21,7 + bld r7,7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,11 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,5 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,2 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,33 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,48 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,24 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,44 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,22 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,43 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,53 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,58 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,29 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,14 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,39 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,51 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,57 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,60 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,30 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,47 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,55 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,59 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,61 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,62 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,31 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r8,Y+1 + ldd r9,Y+2 + ldd r10,Y+3 + ldd r11,Y+4 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,15 + ldi r21,128 + eor r6,r20 + eor r7,r21 + eor r4,r18 + eor r5,r18 + rcall 1185f + std Y+1,r8 + std Y+2,r9 + std Y+3,r10 + std Y+4,r11 + ldd r8,Y+5 + ldd r9,Y+6 + ldd r10,Y+7 + ldd r11,Y+8 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,7 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+5,r8 + std Y+6,r9 + std Y+7,r10 + std Y+8,r11 + ldd r8,Y+9 + ldd r9,Y+10 + ldd r10,Y+11 + ldd r11,Y+12 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,3 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + std Y+9,r8 + std Y+10,r9 + std Y+11,r10 + std Y+12,r11 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ldi r20,1 + ldi r21,128 + eor r6,r20 + eor r7,r21 + rcall 1185f + rjmp 1374f +1185: + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + bst r22,1 + bld r0,0 + bst r23,4 + bld r22,1 + bst r22,3 + bld r23,4 + bst r22,4 + bld r22,3 + bst r0,0 + bld r22,4 + bst r22,2 + bld r0,0 + bst r23,0 + bld r22,2 + bst r0,0 + bld r23,0 + bst r22,5 + bld r0,0 + bst r23,5 + bld r22,5 + bst r23,7 + bld r23,5 + bst r22,7 + bld r23,7 + bst r0,0 + bld r22,7 + bst r22,6 + bld r0,0 + bst r23,1 + bld r22,6 + bst r23,6 + bld r23,1 + bst r23,3 + bld r23,6 + bst r0,0 + bld r23,3 + bst r2,0 + bld r0,0 + bst r2,4 + bld r2,0 + bst r2,5 + bld r2,4 + bst r2,1 + bld r2,5 + bst r0,0 + bld r2,1 + bst r2,2 + bld r0,0 + bst r3,4 + bld r2,2 + bst r2,7 + bld r3,4 + bst r3,1 + bld r2,7 + bst r0,0 + bld r3,1 + bst r2,3 + bld r0,0 + bst r3,0 + bld r2,3 + bst r2,6 + bld r3,0 + bst r3,5 + bld r2,6 + bst r0,0 + bld r3,5 + bst r3,2 + bld r0,0 + bst r3,6 + bld r3,2 + bst r3,7 + bld r3,6 + bst r3,3 + bld r3,7 + bst r0,0 + bld r3,3 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r4,2 + bld r5,2 + bst r0,0 + bld r4,2 + bst r4,1 + bld r0,0 + bst r4,4 + bld r4,1 + bst r5,1 + bld r4,4 + bst r4,6 + bld r5,1 + bst r0,0 + bld r4,6 + bst r4,3 + bld r0,0 + bst r5,4 + bld r4,3 + bst r5,3 + bld r5,4 + bst r5,6 + bld r5,3 + bst r0,0 + bld r5,6 + bst r4,7 + bld r0,0 + bst r5,5 + bld r4,7 + bst r0,0 + bld r5,5 + bst r6,0 + bld r0,0 + bst r7,4 + bld r6,0 + bst r7,7 + bld r7,4 + bst r6,3 + bld r7,7 + bst r0,0 + bld r6,3 + bst r6,1 + bld r0,0 + bst r7,0 + bld r6,1 + bst r7,6 + bld r7,0 + bst r6,7 + bld r7,6 + bst r0,0 + bld r6,7 + bst r6,2 + bld r0,0 + bst r6,4 + bld r6,2 + bst r7,5 + bld r6,4 + bst r7,3 + bld r7,5 + bst r0,0 + bld r7,3 + bst r6,5 + bld r0,0 + bst r7,1 + bld r6,5 + bst r7,2 + bld r7,1 + bst r6,6 + bld r7,2 + bst r0,0 + bld r6,6 + movw r20,r6 + movw r6,r22 + movw r22,r20 + and r20,r2 + and r21,r3 + eor r4,r20 + eor r5,r21 + com r6 + com r7 + eor r2,r6 + eor r3,r7 + eor r6,r4 + eor r7,r5 + mov r0,r22 + or r0,r2 + eor r4,r0 + mov r0,r23 + or r0,r3 + eor r5,r0 + mov r0,r2 + and r0,r6 + eor r22,r0 + mov r0,r3 + and r0,r7 + eor r23,r0 + mov r0,r22 + and r0,r4 + eor r2,r0 + mov r0,r23 + and r0,r5 + eor r3,r0 + ret +1374: + ldd r26,Y+17 + ldd r27,Y+18 + bst r22,0 + bld r20,0 + bst r2,0 + bld r20,1 + bst r4,0 + bld r20,2 + bst r6,0 + bld r20,3 + bst r22,1 + bld r20,4 + bst r2,1 + bld r20,5 + bst r4,1 + bld r20,6 + bst r6,1 + bld r20,7 + bst r22,2 + bld r21,0 + bst r2,2 + bld r21,1 + bst r4,2 + bld r21,2 + bst r6,2 + bld r21,3 + bst r22,3 + bld r21,4 + bst r2,3 + bld r21,5 + bst r4,3 + bld r21,6 + bst r6,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r22,4 + bld r20,0 + bst r2,4 + bld r20,1 + bst r4,4 + bld r20,2 + bst r6,4 + bld r20,3 + bst r22,5 + bld r20,4 + bst r2,5 + bld r20,5 + bst r4,5 + bld r20,6 + bst r6,5 + bld r20,7 + bst r22,6 + bld r21,0 + bst r2,6 + bld r21,1 + bst r4,6 + bld r21,2 + bst r6,6 + bld r21,3 + bst r22,7 + bld r21,4 + bst r2,7 + bld r21,5 + bst r4,7 + bld r21,6 + bst r6,7 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,0 + bld r20,0 + bst r3,0 + bld r20,1 + bst r5,0 + bld r20,2 + bst r7,0 + bld r20,3 + bst r23,1 + bld r20,4 + bst r3,1 + bld r20,5 + bst r5,1 + bld r20,6 + bst r7,1 + bld r20,7 + bst r23,2 + bld r21,0 + bst r3,2 + bld r21,1 + bst r5,2 + bld r21,2 + bst r7,2 + bld r21,3 + bst r23,3 + bld r21,4 + bst r3,3 + bld r21,5 + bst r5,3 + bld r21,6 + bst r7,3 + bld r21,7 + st X+,r20 + st X+,r21 + bst r23,4 + bld r20,0 + bst r3,4 + bld r20,1 + bst r5,4 + bld r20,2 + bst r7,4 + bld r20,3 + bst r23,5 + bld r20,4 + bst r3,5 + bld r20,5 + bst r5,5 + bld r20,6 + bst r7,5 + bld r20,7 + bst r23,6 + bld r21,0 + bst r3,6 + bld r21,1 + bst r5,6 + bld r21,2 + bst r7,6 + bld r21,3 + bst r23,7 + bld r21,4 + bst r3,7 + bld r21,5 + bst r5,7 + bld r21,6 + bst r7,7 + bld r21,7 + st X+,r20 + st X+,r21 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift64t_decrypt, .-gift64t_decrypt + +#endif diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.c b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.c index 321d079..81bc8a3 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.c +++ b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.c @@ -24,6 +24,8 @@ #include "internal-util.h" #include +#if !GIFT64_LOW_MEMORY + /* Round constants for GIFT-64 in the fixsliced representation */ static uint32_t const GIFT64_RC[28] = { 0x22000011, 0x00002299, 0x11118811, 0x880000ff, 0x33111199, 0x990022ee, @@ -33,19 +35,6 @@ static uint32_t const GIFT64_RC[28] = { 0x22008811, 0x00002288, 0x00118811, 0x880000bb }; -int gift64b_init - (gift64b_key_schedule_t *ks, const unsigned char *key, size_t key_len) -{ - if (!ks || !key || key_len != 16) - return 0; - ks->k[0] = be_load_word32(key); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key + 12); - gift64b_update_round_keys(ks); - return 1; -} - /* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ #define bit_permute_step(_y, mask, shift) \ do { \ @@ -249,7 +238,7 @@ int gift64b_init ((out & 0x00000F00U) << 8) | ((out & 0x0000F000U) << 12); \ } while (0) -void gift64b_update_round_keys(gift64b_key_schedule_t *ks) +void gift64n_update_round_keys(gift64n_key_schedule_t *ks) { uint32_t x; @@ -293,7 +282,7 @@ void gift64b_update_round_keys(gift64b_key_schedule_t *ks) * \param Tweak value or zero if there is no tweak. */ static void gift64b_encrypt_core - (const gift64b_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) + (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) { const uint32_t *rc = GIFT64_RC; uint32_t s0, s1, s2, s3, temp; @@ -391,7 +380,7 @@ static void gift64b_encrypt_core * \param Tweak value or zero if there is no tweak. */ static void gift64b_decrypt_core - (const gift64b_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) + (const gift64n_key_schedule_t *ks, uint32_t state[4], uint32_t tweak) { const uint32_t *rc = GIFT64_RC + 28 - 4; uint32_t s0, s1, s2, s3, temp; @@ -513,18 +502,14 @@ static void gift64b_decrypt_core state[3] = s3; } -int gift64n_init - (gift64n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) { /* Use the little-endian byte order from the LOTUS-AEAD submission */ - if (!ks || !key || key_len != 16) - return 0; ks->k[0] = le_load_word32(key + 12); ks->k[1] = le_load_word32(key + 8); ks->k[2] = le_load_word32(key + 4); ks->k[3] = le_load_word32(key); - gift64b_update_round_keys(ks); - return 1; + gift64n_update_round_keys(ks); } /** @@ -622,124 +607,599 @@ void gift64n_decrypt gift64n_to_nibbles(output, state); } +/* 4-bit tweak values expanded to 32-bit in fixsliced form */ +static uint32_t const GIFT64_tweaks[16] = { + 0x00000000, 0xee11ee11, 0xdd22dd22, 0x33333333, 0xbb44bb44, 0x55555555, + 0x66666666, 0x88778877, 0x77887788, 0x99999999, 0xaaaaaaaa, 0x44bb44bb, + 0xcccccccc, 0x22dd22dd, 0x11ee11ee, 0xffffffff +}; + +void gift64t_encrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint16_t tweak) +{ + uint32_t state[4]; + gift64n_to_words(state, input); + gift64b_encrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); + gift64n_to_nibbles(output, state); +} + +void gift64t_decrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint16_t tweak) +{ + uint32_t state[4]; + gift64n_to_words(state, input); + gift64b_decrypt_core(ks, state, GIFT64_tweaks[tweak & 0x0F]); + gift64n_to_nibbles(output, state); +} + +#elif !defined(__AVR__) /* GIFT64_LOW_MEMORY */ + +/* Round constants for GIFT-64 */ +static uint8_t const GIFT64_RC[28] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B +}; + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint16_t y = (_y); \ + uint16_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step_simple */ +#define bit_permute_step_simple(_y, mask, shift) \ + do { \ + (_y) = (((_y) & (mask)) << (shift)) | (((_y) >> (shift)) & (mask)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 4 bits with respect to the next: + * + * P0: 0 12 8 4 1 13 9 5 2 14 10 6 3 15 11 7 + * P1: 4 0 12 8 5 1 13 9 6 2 14 10 7 3 15 11 + * P2: 8 4 0 12 9 5 1 13 10 6 2 14 11 7 3 15 + * P3: 12 8 4 0 13 9 5 1 14 10 6 2 15 11 7 3 + * + * The most efficient permutation from the online generator was P1, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P1 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM1_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a, 3); \ + bit_permute_step(x, 0x00cc, 6); \ + bit_permute_step_simple(x, 0x0f0f, 4); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM1_INNER(_x); \ + (x) = leftRotate12_16(_x); \ + } while (0) +#define PERM1(x) PERM1_INNER(x) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM1_INNER(_x); \ + (x) = leftRotate4_16(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM1_INNER(_x); \ + (x) = leftRotate8_16(_x); \ + } while (0) + +#define INV_PERM1_INNER(x) \ + do { \ + bit_permute_step(x, 0x0505, 5); \ + bit_permute_step(x, 0x00cc, 6); \ + bit_permute_step_simple(x, 0x0f0f, 4); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate12_16(x); \ + INV_PERM1_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) INV_PERM1_INNER(x) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate4_16(x); \ + INV_PERM1_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = rightRotate8_16(x); \ + INV_PERM1_INNER(_x); \ + (x) = _x; \ + } while (0) + /** - * \brief Converts the GIFT-64 nibble-based representation into word-based - * (big-endian version). + * \brief Encrypts a 64-bit block with GIFT-64 (bit-sliced). * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. + * \param ks Points to the GIFT-64 key schedule. + * \param output Output buffer which must be at least 8 bytes in length. + * \param input Input buffer which must be at least 8 bytes in length. * - * The output words will be in fixsliced form. Technically the output will - * contain two blocks for gift64b_encrypt_core() to process in parallel but - * both blocks will have the same value. + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. */ -static void gift64nb_to_words(uint32_t output[4], const unsigned char *input) +static void gift64b_encrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { - uint32_t s0, s1, s2, s3; + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input block into 32-bit words */ - s0 = be_load_word32(input + 4); - s2 = be_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word16(input); + s1 = be_load_word16(input + 2); + s2 = be_load_word16(input + 4); + s3 = be_load_word16(input + 6); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + + /* Perform all 28 rounds */ + for (round = 0; round < 28; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 64-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bits in the block */ - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); +} - /* Split into two identical blocks in fixsliced form */ - s1 = s0; - s3 = s2; - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; +/** + * \brief Decrypts a 64-bit block with GIFT-64 (bit-sliced). + * + * \param ks Points to the GIFT-64 key schedule. + * \param output Output buffer which must be at least 8 bytes in length. + * \param input Input buffer which must be at least 8 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place decryption. + */ +static void gift64b_decrypt + (const gift64n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word16(input); + s1 = be_load_word16(input + 2); + s2 = be_load_word16(input + 4); + s3 = be_load_word16(input + 6); + + /* Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 28; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 7 times for the full 28 rounds. The overall + * effect is to apply a "14 right and 28 left" bit-rotation to every word + * in the key schedule. That is equivalent to "14 right and 12 left" + * on the 16-bit sub-words. + */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | + ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); + w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | + ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); + w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | + ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); + w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | + ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); + + /* Perform all 28 rounds */ + for (round = 28; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); } +void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian byte order from the LOTUS-AEAD submission */ + ks->k[0] = le_load_word32(key + 12); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key); +} + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step_32(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + /** - * \brief Converts the GIFT-64 word-based representation into nibble-based - * (big-endian version). + * \brief Converts the GIFT-64 nibble-based representation into word-based. * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. * - * The input words are in fixsliced form. Technically there are two - * identical blocks in the input. We drop one when we write to the output. + * The \a input and \a output buffers can be the same buffer. */ -static void gift64nb_to_nibbles(unsigned char *output, const uint32_t input[4]) +static void gift64n_to_words + (unsigned char *output, const unsigned char *input) { - uint32_t s0, s1, s2, s3; + uint32_t s0, s1; - /* Load the state and split the two blocks into separate words */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - gift64b_swap_move(s0, s2, 0x0000FFFFU, 16); - gift64b_swap_move(s1, s3, 0x0000FFFFU, 16); - gift64b_swap_move(s0, s1, 0x0F0F0F0FU, 4); - gift64b_swap_move(s2, s3, 0x0F0F0F0FU, 4); + /* Load the input buffer into 32-bit words. We use the nibble order from + * the LOTUS-AEAD submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-64 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 4); + s1 = le_load_word32(input); - /* Rearrange the bits in the first block back into nibble form */ - gift64b_swap_move(s0, s0, 0x0000FF00U, 8); - gift64b_swap_move(s0, s0, 0x00CC00CCU, 6); - gift64b_swap_move(s0, s0, 0x0A0A0A0AU, 3); - gift64b_swap_move(s2, s2, 0x0000FF00U, 8); - gift64b_swap_move(s2, s2, 0x00CC00CCU, 6); - gift64b_swap_move(s2, s2, 0x0A0A0A0AU, 3); - be_store_word32(output, s2); - be_store_word32(output + 4, s0); + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step_32(x, 0x0a0a0a0a, 3); \ + bit_permute_step_32(x, 0x00cc00cc, 6); \ + bit_permute_step_32(x, 0x0000f0f0, 12); \ + bit_permute_step_32(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)(s0 >> 8); + output[3] = (uint8_t)(s1 >> 8); + output[4] = (uint8_t)(s0 >> 16); + output[5] = (uint8_t)(s1 >> 16); + output[6] = (uint8_t)(s0 >> 24); + output[7] = (uint8_t)(s1 >> 24); } -void gift64nb_encrypt +/** + * \brief Converts the GIFT-64 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift64n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s1 contains the least significant */ + s0 = (((uint32_t)(input[6])) << 24) | + (((uint32_t)(input[4])) << 16) | + (((uint32_t)(input[2])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[7])) << 24) | + (((uint32_t)(input[5])) << 16) | + (((uint32_t)(input[3])) << 8) | + ((uint32_t)(input[1])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step_32(x, 0x00aa00aa, 7); \ + bit_permute_step_32(x, 0x0000cccc, 14); \ + bit_permute_step_32(x, 0x00f000f0, 4); \ + bit_permute_step_32(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 4, s0); + le_store_word32(output, s1); +} + +void gift64n_encrypt (const gift64n_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - uint32_t state[4]; - gift64nb_to_words(state, input); - gift64b_encrypt_core(ks, state, 0); - gift64nb_to_nibbles(output, state); + gift64n_to_words(output, input); + gift64b_encrypt(ks, output, output); + gift64n_to_nibbles(output, output); } -void gift64nb_decrypt +void gift64n_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - uint32_t state[4]; - gift64nb_to_words(state, input); - gift64b_decrypt_core(ks, state, 0); - gift64nb_to_nibbles(output, state); + gift64n_to_words(output, input); + gift64b_decrypt(ks, output, output); + gift64n_to_nibbles(output, output); } -/* 4-bit tweak values expanded to 32-bit in fixsliced form */ -static uint32_t const GIFT64_tweaks[16] = { - 0x00000000, 0xee11ee11, 0xdd22dd22, 0x33333333, 0xbb44bb44, 0x55555555, - 0x66666666, 0x88778877, 0x77887788, 0x99999999, 0xaaaaaaaa, 0x44bb44bb, - 0xcccccccc, 0x22dd22dd, 0x11ee11ee, 0xffffffff -}; - void gift64t_encrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint16_t tweak) { - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_encrypt_core(ks, state, GIFT64_tweaks[tweak]); - gift64n_to_nibbles(output, state); + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift64n_to_words(output, input); + s0 = be_load_word16(output); + s1 = be_load_word16(output + 2); + s2 = be_load_word16(output + 4); + s3 = be_load_word16(output + 6); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + + /* Perform all 28 rounds */ + for (round = 0; round < 28; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 64-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round]; + + /* AddTweak - XOR in the tweak every 4 rounds except the last */ + if (((round + 1) % 4) == 0 && round < 27) + s2 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); + gift64n_to_nibbles(output, output); } void gift64t_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint16_t tweak) { - uint32_t state[4]; - gift64n_to_words(state, input); - gift64b_decrypt_core(ks, state, GIFT64_tweaks[tweak]); - gift64n_to_nibbles(output, state); + uint16_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from nibbles */ + gift64n_to_words(output, input); + s0 = be_load_word16(output); + s1 = be_load_word16(output + 2); + s2 = be_load_word16(output + 4); + s3 = be_load_word16(output + 6); + + /* Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 28; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 7 times for the full 28 rounds. The overall + * effect is to apply a "14 right and 28 left" bit-rotation to every word + * in the key schedule. That is equivalent to "14 right and 12 left" + * on the 16-bit sub-words. + */ + w0 = ks->k[0]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[3]; + w0 = ((w0 & 0xC0000000U) >> 14) | ((w0 & 0x3FFF0000U) << 2) | + ((w0 & 0x0000000FU) << 12) | ((w0 & 0x0000FFF0U) >> 4); + w1 = ((w1 & 0xC0000000U) >> 14) | ((w1 & 0x3FFF0000U) << 2) | + ((w1 & 0x0000000FU) << 12) | ((w1 & 0x0000FFF0U) >> 4); + w2 = ((w2 & 0xC0000000U) >> 14) | ((w2 & 0x3FFF0000U) << 2) | + ((w2 & 0x0000000FU) << 12) | ((w2 & 0x0000FFF0U) >> 4); + w3 = ((w3 & 0xC0000000U) >> 14) | ((w3 & 0x3FFF0000U) << 2) | + ((w3 & 0x0000000FU) << 12) | ((w3 & 0x0000FFF0U) >> 4); + + /* Perform all 28 rounds */ + for (round = 28; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 4 rounds except the last */ + if ((round % 4) == 0 && round != 28) + s2 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s0 ^= (uint16_t)w3; + s1 ^= (uint16_t)(w3 >> 16); + s3 ^= 0x8000U ^ GIFT64_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in nibble form */ + be_store_word16(output, s0); + be_store_word16(output + 2, s1); + be_store_word16(output + 4, s2); + be_store_word16(output + 6, s3); + gift64n_to_nibbles(output, output); } + +#endif /* GIFT64_LOW_MEMORY */ diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.h b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.h index 40479c7..010359b 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.h +++ b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-gift64.h @@ -28,6 +28,7 @@ * \brief GIFT-64 block cipher. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ @@ -39,57 +40,63 @@ extern "C" { #endif /** + * \var GIFT64_LOW_MEMORY + * \brief Define this to 1 to use a low memory version of the key schedule. + * + * The default is to use the fix-sliced version of GIFT-64 which is very + * fast on 32-bit platforms but requires 48 bytes to store the key schedule. + * The large key schedule may be a problem on 8-bit and 16-bit platforms. + * The fix-sliced version also encrypts two blocks at a time in 32-bit + * words which is an unnecessary optimization for 8-bit platforms. + * + * GIFT64_LOW_MEMORY can be defined to 1 to select the original non + * fix-sliced version which only requires 16 bytes to store the key, + * with the rest of the key schedule expanded on the fly. + */ +#if !defined(GIFT64_LOW_MEMORY) +#if defined(__AVR__) +#define GIFT64_LOW_MEMORY 1 +#else +#define GIFT64_LOW_MEMORY 0 +#endif +#endif + +/** * \brief Size of a GIFT-64 block in bytes. */ #define GIFT64_BLOCK_SIZE 8 /** - * \brief Structure of the key schedule for GIFT-64 (bit-sliced). + * \brief Structure of the key schedule for GIFT-64. */ typedef struct { uint32_t k[4]; /**< Words of the key schedule */ +#if !GIFT64_LOW_MEMORY uint32_t rk[8]; /**< Pre-computed round keys for fixsliced form */ +#endif -} gift64b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-64 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int gift64b_init - (gift64b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +} gift64n_key_schedule_t; /** + * \fn void gift64n_update_round_keys(gift64n_key_schedule_t *ks); * \brief Updates the round keys after a change in the base key. * * \param ks Points to the key schedule to update. */ -void gift64b_update_round_keys(gift64b_key_schedule_t *ks); - -/** - * \brief Structure of the key schedule for GIFT-64 (nibble-based). - */ -typedef gift64b_key_schedule_t gift64n_key_schedule_t; +#if GIFT64_LOW_MEMORY +#define gift64n_update_round_keys(ks) do { ; } while (0) /* Not needed */ +#else +void gift64n_update_round_keys(gift64n_key_schedule_t *ks); +#endif /** * \brief Initializes the key schedule for GIFT-64 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift64n_init - (gift64n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift64n_init(gift64n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 64-bit block with GIFT-64 (nibble-based). @@ -119,33 +126,23 @@ void gift64n_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); -/** - * \brief Encrypts a 64-bit block with GIFT-64 (nibble-based big-endian). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift64nb_encrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 64-bit block with GIFT-64 (nibble-based big-endian). - * - * \param ks Points to the GIFT-64 key schedule. - * \param output Output buffer which must be at least 8 bytes in length. - * \param input Input buffer which must be at least 8 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift64nb_decrypt - (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); +/* 4-bit tweak values expanded to 16-bit for TweGIFT-64 */ +#define GIFT64T_TWEAK_0 0x0000 /**< TweGIFT-64 tweak value 0 */ +#define GIFT64T_TWEAK_1 0xe1e1 /**< TweGIFT-64 tweak value 1 */ +#define GIFT64T_TWEAK_2 0xd2d2 /**< TweGIFT-64 tweak value 2 */ +#define GIFT64T_TWEAK_3 0x3333 /**< TweGIFT-64 tweak value 3 */ +#define GIFT64T_TWEAK_4 0xb4b4 /**< TweGIFT-64 tweak value 4 */ +#define GIFT64T_TWEAK_5 0x5555 /**< TweGIFT-64 tweak value 5 */ +#define GIFT64T_TWEAK_6 0x6666 /**< TweGIFT-64 tweak value 6 */ +#define GIFT64T_TWEAK_7 0x8787 /**< TweGIFT-64 tweak value 7 */ +#define GIFT64T_TWEAK_8 0x7878 /**< TweGIFT-64 tweak value 8 */ +#define GIFT64T_TWEAK_9 0x9999 /**< TweGIFT-64 tweak value 9 */ +#define GIFT64T_TWEAK_10 0xaaaa /**< TweGIFT-64 tweak value 10 */ +#define GIFT64T_TWEAK_11 0x4b4b /**< TweGIFT-64 tweak value 11 */ +#define GIFT64T_TWEAK_12 0xcccc /**< TweGIFT-64 tweak value 12 */ +#define GIFT64T_TWEAK_13 0x2d2d /**< TweGIFT-64 tweak value 13 */ +#define GIFT64T_TWEAK_14 0x1e1e /**< TweGIFT-64 tweak value 14 */ +#define GIFT64T_TWEAK_15 0xffff /**< TweGIFT-64 tweak value 15 */ /** * \brief Encrypts a 64-bit block with TweGIFT-64 (tweakable variant). @@ -153,7 +150,7 @@ void gift64nb_decrypt * \param ks Points to the GIFT-64 key schedule. * \param output Output buffer which must be at least 8 bytes in length. * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 16-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -165,7 +162,7 @@ void gift64nb_decrypt */ void gift64t_encrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint16_t tweak); /** * \brief Decrypts a 64-bit block with TweGIFT-64 (tweakable variant). @@ -173,7 +170,7 @@ void gift64t_encrypt * \param ks Points to the GIFT-64 key schedule. * \param output Output buffer which must be at least 8 bytes in length. * \param input Input buffer which must be at least 8 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 16-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -185,7 +182,7 @@ void gift64t_encrypt */ void gift64t_decrypt (const gift64n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint16_t tweak); #ifdef __cplusplus } diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-util.h b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-util.h +++ b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/lotus-locus.c b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/lotus-locus.c index e60b084..4a1efd0 100644 --- a/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/lotus-locus.c +++ b/lotus-locus/Implementations/crypto_aead/twegift64lotusaeadv1/rhys/lotus-locus.c @@ -57,7 +57,7 @@ STATIC_INLINE void lotus_or_locus_mul_2(gift64n_key_schedule_t *ks) ks->k[1] = (ks->k[1] << 1) | (ks->k[2] >> 31); ks->k[2] = (ks->k[2] << 1) | (ks->k[3] >> 31); ks->k[3] = (ks->k[3] << 1) ^ (mask & 0x87); - gift64b_update_round_keys(ks); + gift64n_update_round_keys(ks); } /** @@ -77,12 +77,12 @@ static void lotus_or_locus_init const unsigned char *nonce, unsigned char *T) { - gift64n_init(ks, key, LOTUS_AEAD_KEY_SIZE); + gift64n_init(ks, key); memset(deltaN, 0, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, deltaN, deltaN, 0); + gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_0); lw_xor_block_2_src(T, key, nonce, LOTUS_AEAD_KEY_SIZE); - gift64n_init(ks, T, LOTUS_AEAD_KEY_SIZE); - gift64t_encrypt(ks, deltaN, deltaN, 1); + gift64n_init(ks, T); + gift64t_encrypt(ks, deltaN, deltaN, GIFT64T_TWEAK_1); } /** @@ -105,7 +105,7 @@ static void lotus_or_locus_process_ad while (adlen > GIFT64_BLOCK_SIZE) { lotus_or_locus_mul_2(ks); lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, 2); + gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); lw_xor_block(V, X, GIFT64_BLOCK_SIZE); ad += GIFT64_BLOCK_SIZE; adlen -= GIFT64_BLOCK_SIZE; @@ -116,10 +116,10 @@ static void lotus_or_locus_process_ad memcpy(X, deltaN, GIFT64_BLOCK_SIZE); lw_xor_block(X, ad, temp); X[temp] ^= 0x01; - gift64t_encrypt(ks, X, X, 3); + gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_3); } else { lw_xor_block_2_src(X, ad, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, X, X, 2); + gift64t_encrypt(ks, X, X, GIFT64T_TWEAK_2); } lw_xor_block(V, X, GIFT64_BLOCK_SIZE); } @@ -142,7 +142,7 @@ static void lotus_or_locus_gen_tag lotus_or_locus_mul_2(ks); lw_xor_block(W, deltaN, GIFT64_BLOCK_SIZE); lw_xor_block(W, V, GIFT64_BLOCK_SIZE); - gift64t_encrypt(ks, W, W, 6); + gift64t_encrypt(ks, W, W, GIFT64T_TWEAK_6); lw_xor_block_2_src(tag, W, deltaN, GIFT64_BLOCK_SIZE); } @@ -180,15 +180,15 @@ int lotus_aead_encrypt while (mlen > (GIFT64_BLOCK_SIZE * 2)) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X1, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, 4); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_4); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 4); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); lw_xor_block_2_src (X2, m + GIFT64_BLOCK_SIZE, X2, GIFT64_BLOCK_SIZE); lw_xor_block_2_src(c, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 5); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 5); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); lw_xor_block_2_src (c + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE * 2; @@ -199,9 +199,9 @@ int lotus_aead_encrypt lotus_or_locus_mul_2(&ks); memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, 12); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 12); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); if (temp <= GIFT64_BLOCK_SIZE) { lw_xor_block(WV, m, temp); lw_xor_block(X2, m, temp); @@ -212,9 +212,9 @@ int lotus_aead_encrypt c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(WV, m, temp); lw_xor_block(X1, X2, temp); lw_xor_block_2_src(c, X1, m, temp); @@ -265,14 +265,14 @@ int lotus_aead_decrypt while (clen > (GIFT64_BLOCK_SIZE * 2)) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X1, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X1, 5); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_5); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 5); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_5); lw_xor_block(X2, c + GIFT64_BLOCK_SIZE, GIFT64_BLOCK_SIZE); lw_xor_block_2_src(m, X2, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 4); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 4); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_4); lw_xor_block_2_src (m + GIFT64_BLOCK_SIZE, X1, X2, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE * 2; @@ -283,9 +283,9 @@ int lotus_aead_decrypt lotus_or_locus_mul_2(&ks); memcpy(X1, deltaN, GIFT64_BLOCK_SIZE); X1[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X2, X1, 12); + gift64t_encrypt(&ks, X2, X1, GIFT64T_TWEAK_12); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 12); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_12); if (temp <= GIFT64_BLOCK_SIZE) { lw_xor_block_2_src(m, X2, c, temp); lw_xor_block(m, deltaN, temp); @@ -298,9 +298,9 @@ int lotus_aead_decrypt c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; temp -= GIFT64_BLOCK_SIZE; - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(WV, X2, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X2, X2, 13); + gift64t_encrypt(&ks, X2, X2, GIFT64T_TWEAK_13); lw_xor_block(X1, X2, temp); lw_xor_block_2_src(m, X1, c, temp); lw_xor_block(WV, m, temp); @@ -346,9 +346,9 @@ int locus_aead_encrypt while (mlen > GIFT64_BLOCK_SIZE) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X, m, deltaN, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, 4); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, 4); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block_2_src(c, X, deltaN, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; @@ -358,10 +358,10 @@ int locus_aead_encrypt lotus_or_locus_mul_2(&ks); memcpy(X, deltaN, GIFT64_BLOCK_SIZE); X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); lw_xor_block(WV, m, temp); - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(X, deltaN, temp); lw_xor_block_2_src(c, m, X, temp); c += temp; @@ -409,9 +409,9 @@ int locus_aead_decrypt while (clen > GIFT64_BLOCK_SIZE) { lotus_or_locus_mul_2(&ks); lw_xor_block_2_src(X, c, deltaN, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, 4); + gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_decrypt(&ks, X, X, 4); + gift64t_decrypt(&ks, X, X, GIFT64T_TWEAK_4); lw_xor_block_2_src(m, X, deltaN, GIFT64_BLOCK_SIZE); c += GIFT64_BLOCK_SIZE; m += GIFT64_BLOCK_SIZE; @@ -421,9 +421,9 @@ int locus_aead_decrypt lotus_or_locus_mul_2(&ks); memcpy(X, deltaN, GIFT64_BLOCK_SIZE); X[0] ^= (unsigned char)temp; - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(WV, X, GIFT64_BLOCK_SIZE); - gift64t_encrypt(&ks, X, X, 5); + gift64t_encrypt(&ks, X, X, GIFT64T_TWEAK_5); lw_xor_block(X, deltaN, temp); lw_xor_block_2_src(m, c, X, temp); lw_xor_block(WV, m, temp); diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.c b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.h b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/api.h b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/encrypt.c b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/encrypt.c deleted file mode 100644 index e1ea967..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "orange.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return orange_zest_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return orange_zest_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.c b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.c deleted file mode 100644 index b8743fe..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.c +++ /dev/null @@ -1,479 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-photon256.h" -#include "internal-util.h" - -/** - * \brief Number of rounds in the PHOTON-256 permutation in bit-sliced form. - */ -#define PHOTON256_ROUNDS 12 - -/* Round constants for PHOTON-256 */ -static uint32_t const photon256_rc[PHOTON256_ROUNDS] = { - 0x96d2f0e1, 0xb4f0d2c3, 0xf0b49687, 0x692d0f1e, - 0x5a1e3c2d, 0x3c785a4b, 0xe1a58796, 0x4b0f2d3c, - 0x1e5a7869, 0xa5e1c3d2, 0xd296b4a5, 0x2d694b5a -}; - -/** - * \brief Evaluates the PHOTON-256 S-box in bit-sliced form. - * - * \param x0 Slice with bit 0 of all nibbles. - * \param x1 Slice with bit 1 of all nibbles. - * \param x2 Slice with bit 2 of all nibbles. - * \param x3 Slice with bit 3 of all nibbles. - * - * This bit-sliced S-box implementation is based on the AVR version - * "add_avr8_bitslice_asm" from the PHOTON-Beetle reference code. - */ -#define photon256_sbox(x0, x1, x2, x3) \ - do { \ - x1 ^= x2; \ - x3 ^= (x2 & x1); \ - t1 = x3; \ - x3 = (x3 & x1) ^ x2; \ - t2 = x3; \ - x3 ^= x0; \ - x3 = ~(x3); \ - x2 = x3; \ - t2 |= x0; \ - x0 ^= t1; \ - x1 ^= x0; \ - x2 |= x1; \ - x2 ^= t1; \ - x1 ^= t2; \ - x3 ^= x1; \ - } while (0) - -/** - * \brief Performs a field multiplication on the 8 nibbles in a row. - * - * \param a Field constant to multiply by. - * \param x Bit-sliced form of the row, with bits 0..3 of each nibble - * in bytes 0..3 of the word. - * - * \return a * x packed into the bytes of a word. - */ -static uint32_t photon256_field_multiply(uint8_t a, uint32_t x) -{ - /* For each 4-bit nibble we need to do this: - * - * result = 0; - * for (bit = 0; bit < 4; ++ bit) { - * if ((a & (1 << bit)) != 0) - * result ^= x; - * if ((x & 0x08) != 0) { - * x = (x << 1) ^ 3; - * } else { - * x = (x << 1); - * } - * } - * - * We don't need to worry about constant time for "a" because it is a - * known constant that isn't data-dependent. But we do need to worry - * about constant time for "x" as it is data. - */ - uint32_t result = 0; - uint32_t t; - #define PARALLEL_CONDITIONAL_ADD(bit) \ - do { \ - if ((a) & (1 << (bit))) \ - result ^= x; \ - } while (0) - #define PARALELL_ROTATE() \ - do { \ - t = x >> 24; \ - x = (x << 8) ^ t ^ (t << 8); \ - } while (0) - PARALLEL_CONDITIONAL_ADD(0); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(1); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(2); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(3); - return result; -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Converts a PHOTON-256 state into bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_to_sliced - (uint32_t out[PHOTON256_STATE_SIZE / 4], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* We first scatter bits 0..3 of the nibbles to bytes 0..3 of the words. - * Then we rearrange the bytes to group all bits N into word N. - * - * Permutation generated with "http://programming.sirrida.de/calcperm.php". - * - * P = [0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 - * 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31] - */ - uint32_t t0, t1, t2, t3; - #define TO_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - #define FROM_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - t0 = le_load_word32(in); - t1 = le_load_word32(in + 4); - t2 = le_load_word32(in + 8); - t3 = le_load_word32(in + 12); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[0] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[1] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[2] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[3] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); - t0 = le_load_word32(in + 16); - t1 = le_load_word32(in + 20); - t2 = le_load_word32(in + 24); - t3 = le_load_word32(in + 28); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[4] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[5] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[6] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[7] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); -} - -/** - * \brief Converts a PHOTON-256 state from bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_from_sliced - (unsigned char out[PHOTON256_STATE_SIZE], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* Do the reverse of photon256_to_sliced() */ - uint32_t x0, x1, x2, x3; - x0 = ((uint32_t)(in[0])) | - (((uint32_t)(in[4])) << 8) | - (((uint32_t)(in[8])) << 16) | - (((uint32_t)(in[12])) << 24); - x1 = ((uint32_t)(in[1])) | - (((uint32_t)(in[5])) << 8) | - (((uint32_t)(in[9])) << 16) | - (((uint32_t)(in[13])) << 24); - x2 = ((uint32_t)(in[2])) | - (((uint32_t)(in[6])) << 8) | - (((uint32_t)(in[10])) << 16) | - (((uint32_t)(in[14])) << 24); - x3 = ((uint32_t)(in[3])) | - (((uint32_t)(in[7])) << 8) | - (((uint32_t)(in[11])) << 16) | - (((uint32_t)(in[15])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out, x0); - le_store_word32(out + 4, x1); - le_store_word32(out + 8, x2); - le_store_word32(out + 12, x3); - x0 = ((uint32_t)(in[16])) | - (((uint32_t)(in[20])) << 8) | - (((uint32_t)(in[24])) << 16) | - (((uint32_t)(in[28])) << 24); - x1 = ((uint32_t)(in[17])) | - (((uint32_t)(in[21])) << 8) | - (((uint32_t)(in[25])) << 16) | - (((uint32_t)(in[29])) << 24); - x2 = ((uint32_t)(in[18])) | - (((uint32_t)(in[22])) << 8) | - (((uint32_t)(in[26])) << 16) | - (((uint32_t)(in[30])) << 24); - x3 = ((uint32_t)(in[19])) | - (((uint32_t)(in[23])) << 8) | - (((uint32_t)(in[27])) << 16) | - (((uint32_t)(in[31])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out + 16, x0); - le_store_word32(out + 20, x1); - le_store_word32(out + 24, x2); - le_store_word32(out + 28, x3); -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -/* Index the bit-sliced state bytes in little-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[(row)] = (uint8_t)(value); \ - S.bytes[(row) + 4] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 8] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 12] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[(row) + 12] = (uint8_t)(value); \ - S.bytes[(row) + 16] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 20] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 24] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#else -/* Index the bit-sliced state bytes in big-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[3 - (row)] = (uint8_t)(value); \ - S.bytes[7 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[11 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[15 - (row)] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[20 - (row)] = (uint8_t)(value); \ - S.bytes[24 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[28 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[32 - (row)] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#endif - -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]) -{ - union { - uint32_t words[PHOTON256_STATE_SIZE / 4]; - uint8_t bytes[PHOTON256_STATE_SIZE]; - } S; - uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - uint8_t round; - - /* Convert the state into bit-sliced form */ - photon256_to_sliced(S.words, state); - - /* Perform all 12 permutation rounds */ - for (round = 0; round < PHOTON256_ROUNDS; ++round) { - /* Add the constants for this round */ - t0 = photon256_rc[round]; - S.words[0] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[1] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[2] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[3] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[4] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[5] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[6] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[7] ^= t0 & 0x01010101U; - - /* Apply the sbox to all nibbles in the state */ - photon256_sbox(S.words[0], S.words[1], S.words[2], S.words[3]); - photon256_sbox(S.words[4], S.words[5], S.words[6], S.words[7]); - - /* Rotate all rows left by the row number. - * - * We do this by applying permutations to the top and bottom words - * to rearrange the bits into the rotated form. Permutations - * generated with "http://programming.sirrida.de/calcperm.php". - * - * P_top = [0 1 2 3 4 5 6 7 15 8 9 10 11 12 13 14 22 23 - * 16 17 18 19 20 21 29 30 31 24 25 26 27 28] - * P_bot = [4 5 6 7 0 1 2 3 11 12 13 14 15 8 9 10 18 19 - * 20 21 22 23 16 17 25 26 27 28 29 30 31 24 - */ - #define TOP_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x07030100, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - #define BOTTOM_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x080c0e0f, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - TOP_ROTATE_PERM(S.words[0]); - TOP_ROTATE_PERM(S.words[1]); - TOP_ROTATE_PERM(S.words[2]); - TOP_ROTATE_PERM(S.words[3]); - BOTTOM_ROTATE_PERM(S.words[4]); - BOTTOM_ROTATE_PERM(S.words[5]); - BOTTOM_ROTATE_PERM(S.words[6]); - BOTTOM_ROTATE_PERM(S.words[7]); - - /* Mix the columns */ - #define MUL(a, x) (photon256_field_multiply((a), (x))) - t0 = READ_ROW0(); - t1 = READ_ROW1(); - t2 = READ_ROW2(); - t3 = READ_ROW3(); - t4 = READ_ROW4(); - t5 = READ_ROW5(); - t6 = READ_ROW6(); - t7 = READ_ROW7(); - t8 = MUL(0x02, t0) ^ MUL(0x04, t1) ^ MUL(0x02, t2) ^ MUL(0x0b, t3) ^ - MUL(0x02, t4) ^ MUL(0x08, t5) ^ MUL(0x05, t6) ^ MUL(0x06, t7); - WRITE_ROW(0, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x09, t1) ^ MUL(0x08, t2) ^ MUL(0x0d, t3) ^ - MUL(0x07, t4) ^ MUL(0x07, t5) ^ MUL(0x05, t6) ^ MUL(0x02, t7); - WRITE_ROW(1, t8); - t8 = MUL(0x04, t0) ^ MUL(0x04, t1) ^ MUL(0x0d, t2) ^ MUL(0x0d, t3) ^ - MUL(0x09, t4) ^ MUL(0x04, t5) ^ MUL(0x0d, t6) ^ MUL(0x09, t7); - WRITE_ROW(2, t8); - t8 = MUL(0x01, t0) ^ MUL(0x06, t1) ^ MUL(0x05, t2) ^ MUL(0x01, t3) ^ - MUL(0x0c, t4) ^ MUL(0x0d, t5) ^ MUL(0x0f, t6) ^ MUL(0x0e, t7); - WRITE_ROW(3, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x0c, t1) ^ MUL(0x09, t2) ^ MUL(0x0d, t3) ^ - MUL(0x0e, t4) ^ MUL(0x05, t5) ^ MUL(0x0e, t6) ^ MUL(0x0d, t7); - WRITE_ROW(4, t8); - t8 = MUL(0x09, t0) ^ MUL(0x0e, t1) ^ MUL(0x05, t2) ^ MUL(0x0f, t3) ^ - MUL(0x04, t4) ^ MUL(0x0c, t5) ^ MUL(0x09, t6) ^ MUL(0x06, t7); - WRITE_ROW(5, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x02, t1) ^ MUL(0x02, t2) ^ MUL(0x0a, t3) ^ - MUL(0x03, t4) ^ MUL(0x01, t5) ^ MUL(0x01, t6) ^ MUL(0x0e, t7); - WRITE_ROW(6, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x01, t1) ^ MUL(0x0d, t2) ^ MUL(0x0a, t3) ^ - MUL(0x05, t4) ^ MUL(0x0a, t5) ^ MUL(0x02, t6) ^ MUL(0x03, t7); - WRITE_ROW(7, t8); - } - - /* Convert back from bit-sliced form to regular form */ - photon256_from_sliced(state, S.bytes); -} diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.h b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.h deleted file mode 100644 index ce8729a..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-photon256.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_PHOTON256_H -#define LW_INTERNAL_PHOTON256_H - -/** - * \file internal-photon256.h - * \brief Internal implementation of the PHOTON-256 permutation. - * - * Warning: The current implementation of PHOTON-256 is constant-time - * but not constant-cache. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the PHOTON-256 permutation state in bytes. - */ -#define PHOTON256_STATE_SIZE 32 - -/** - * \brief Permutes the PHOTON-256 state. - * - * \param state The state to be permuted. - */ -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-util.h b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.c b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.c deleted file mode 100644 index 641e117..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.c +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "orange.h" -#include "internal-photon256.h" -#include "internal-util.h" -#include - -aead_cipher_t const orange_zest_cipher = { - "ORANGE-Zest", - ORANGE_ZEST_KEY_SIZE, - ORANGE_ZEST_NONCE_SIZE, - ORANGE_ZEST_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - orange_zest_aead_encrypt, - orange_zest_aead_decrypt -}; - -aead_hash_algorithm_t const orangish_hash_algorithm = { - "ORANGISH", - sizeof(int), - ORANGISH_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - orangish_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Doubles a block in the GF(128) field a number of times. - * - * \param block The block to be doubled. - * \param value The number of times to double the block. - */ -static void orange_block_double(unsigned char block[16], unsigned char value) -{ - unsigned index; - unsigned char mask; - while (value > 0) { - mask = (unsigned char)(((signed char)(block[15])) >> 7); - for (index = 15; index > 0; --index) - block[index] = (block[index] << 1) | (block[index - 1] >> 7); - block[0] = (block[0] << 1) ^ (mask & 0x87); - --value; - } -} - -/** - * \brief Rotates a block left by 1 bit. - * - * \param out The output block to be set to the rotated version. - * \param in The input block to be rotated, must not overlap with \a out. - */ -static void orange_block_rotate - (unsigned char out[16], const unsigned char in[16]) -{ - unsigned index; - for (index = 15; index > 0; --index) - out[index] = (in[index] << 1) | (in[index - 1] >> 7); - out[0] = (in[0] << 1) | (in[15] >> 7); -} - -/** - * \brief Hash input data with ORANGE. - * - * \param state PHOTON-256 permutation state. - * \param data Points to the data to be hashed. - * \param len Length of the data to be hashed, must not be zero. - * \param domain0 Domain separation value for full last block. - * \param domain1 Domain separation value for partial last block. - */ -static void orange_process_hash - (unsigned char state[PHOTON256_STATE_SIZE], - const unsigned char *data, unsigned long long len, - unsigned char domain0, unsigned char domain1) -{ - unsigned temp; - while (len > PHOTON256_STATE_SIZE) { - photon256_permute(state); - lw_xor_block(state, data, PHOTON256_STATE_SIZE); - data += PHOTON256_STATE_SIZE; - len -= PHOTON256_STATE_SIZE; - } - photon256_permute(state); - temp = (unsigned)len; - if (temp < PHOTON256_STATE_SIZE) { - orange_block_double(state + 16, domain1); - state[temp] ^= 0x01; /* padding */ - } else { - orange_block_double(state + 16, domain0); - } - lw_xor_block(state, data, temp); -} - -/** - * \brief Applies the rho function to the ORANGE state. - * - * \param KS Output keystream to use to encrypt the plaintext or to - * decrypt the ciphertext. - * \param S Rolling key state. - * \param state Rolling PHOTON-256 permutation state. - */ -static void orange_rho - (unsigned char KS[32], unsigned char S[16], const unsigned char state[32]) -{ - orange_block_double(S, 1); - orange_block_rotate(KS, state); - lw_xor_block_2_src(KS + 16, state + 16, S, 16); - memcpy(S, state + 16, 16); -} - -/** - * \brief Encrypts plaintext with ORANGE. - * - * \param state PHOTON-256 permutation state. - * \param k Points to the key for the cipher. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param len Length of the plaintext in bytes, must not be zero. - */ -static void orange_encrypt - (unsigned char state[PHOTON256_STATE_SIZE], const unsigned char *k, - unsigned char *c, const unsigned char *m, unsigned long long len) -{ - unsigned char S[ORANGE_ZEST_KEY_SIZE]; - unsigned char KS[PHOTON256_STATE_SIZE]; - unsigned temp; - memcpy(S, k, ORANGE_ZEST_KEY_SIZE); - while (len > PHOTON256_STATE_SIZE) { - photon256_permute(state); - orange_rho(KS, S, state); - lw_xor_block_2_src(c, m, KS, PHOTON256_STATE_SIZE); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - c += PHOTON256_STATE_SIZE; - m += PHOTON256_STATE_SIZE; - len -= PHOTON256_STATE_SIZE; - } - photon256_permute(state); - temp = (unsigned)len; - if (temp < PHOTON256_STATE_SIZE) { - orange_block_double(state + 16, 2); - orange_rho(KS, S, state); - lw_xor_block_2_src(c, m, KS, temp); - lw_xor_block(state, c, temp); - state[temp] ^= 0x01; /* padding */ - } else { - orange_block_double(state + 16, 1); - orange_rho(KS, S, state); - lw_xor_block_2_src(c, m, KS, PHOTON256_STATE_SIZE); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - } -} - -/** - * \brief Decrypts ciphertext with ORANGE. - * - * \param state PHOTON-256 permutation state. - * \param k Points to the key for the cipher. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param len Length of the plaintext in bytes, must not be zero. - */ -static void orange_decrypt - (unsigned char state[PHOTON256_STATE_SIZE], const unsigned char *k, - unsigned char *m, const unsigned char *c, unsigned long long len) -{ - unsigned char S[ORANGE_ZEST_KEY_SIZE]; - unsigned char KS[PHOTON256_STATE_SIZE]; - unsigned temp; - memcpy(S, k, ORANGE_ZEST_KEY_SIZE); - while (len > PHOTON256_STATE_SIZE) { - photon256_permute(state); - orange_rho(KS, S, state); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - lw_xor_block_2_src(m, c, KS, PHOTON256_STATE_SIZE); - c += PHOTON256_STATE_SIZE; - m += PHOTON256_STATE_SIZE; - len -= PHOTON256_STATE_SIZE; - } - photon256_permute(state); - temp = (unsigned)len; - if (temp < PHOTON256_STATE_SIZE) { - orange_block_double(state + 16, 2); - orange_rho(KS, S, state); - lw_xor_block(state, c, temp); - lw_xor_block_2_src(m, c, KS, temp); - state[temp] ^= 0x01; /* padding */ - } else { - orange_block_double(state + 16, 1); - orange_rho(KS, S, state); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - lw_xor_block_2_src(m, c, KS, PHOTON256_STATE_SIZE); - } -} - -/** - * \brief Generates the authentication tag for ORANGE-Zest. - * - * \param state PHOTON-256 permutation state. - * - * The tag will be left in the leading bytes of the state on exit. - */ -static void orange_generate_tag(unsigned char state[PHOTON256_STATE_SIZE]) -{ - /* Swap the two halves of the state and run the permutation again */ - unsigned posn; - for (posn = 0; posn < (PHOTON256_STATE_SIZE / 2); ++posn) { - unsigned char temp = state[posn]; - state[posn] = state[posn + (PHOTON256_STATE_SIZE / 2)]; - state[posn + (PHOTON256_STATE_SIZE / 2)] = temp; - } - photon256_permute(state); -} - -int orange_zest_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ORANGE_ZEST_TAG_SIZE; - - /* Initialize the PHOTON-256 state with the nonce and key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Handle the associated data and message payload */ - if (adlen == 0) { - if (mlen == 0) { - state[16] ^= 2; /* domain separation */ - photon256_permute(state); - memcpy(c + mlen, state, ORANGE_ZEST_TAG_SIZE); - return 0; - } else { - state[16] ^= 1; /* domain separation */ - orange_encrypt(state, k, c, m, mlen); - } - } else { - orange_process_hash(state, ad, adlen, 1, 2); - if (mlen != 0) - orange_encrypt(state, k, c, m, mlen); - } - - /* Generate the authentication tag */ - orange_generate_tag(state); - memcpy(c + mlen, state, ORANGE_ZEST_TAG_SIZE); - return 0; -} - -int orange_zest_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ORANGE_ZEST_TAG_SIZE) - return -1; - *mlen = clen - ORANGE_ZEST_TAG_SIZE; - - /* Initialize the PHOTON-256 state with the nonce and key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Handle the associated data and message payload */ - clen -= ORANGE_ZEST_TAG_SIZE; - if (adlen == 0) { - if (clen == 0) { - state[16] ^= 2; /* domain separation */ - photon256_permute(state); - return aead_check_tag(m, 0, state, c, ORANGE_ZEST_TAG_SIZE); - } else { - state[16] ^= 1; /* domain separation */ - orange_decrypt(state, k, m, c, clen); - } - } else { - orange_process_hash(state, ad, adlen, 1, 2); - if (clen != 0) - orange_decrypt(state, k, m, c, clen); - } - - /* Check the authentication tag */ - orange_generate_tag(state); - return aead_check_tag(m, clen, state, c + clen, ORANGE_ZEST_TAG_SIZE); -} - -/** - * \brief Rate of absorbing data into the ORANGISH hash state. - */ -#define ORANGISH_RATE 16 - -int orangish_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - unsigned temp; - memset(state, 0, sizeof(state)); - if (inlen == 0) { - /* No absorption necessary for a zero-length input */ - } else if (inlen < ORANGISH_RATE) { - /* Single partial block */ - temp = (unsigned)inlen; - memcpy(state, in, temp); - state[temp] ^= 0x01; /* padding */ - photon256_permute(state); - lw_xor_block(state + 16, in, temp); - state[16 + temp] ^= 0x01; /* padding */ - state[0] ^= 0x02; /* domain separation */ - } else if (inlen == ORANGISH_RATE) { - /* Single full block */ - memcpy(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - state[0] ^= 0x01; /* domain separation */ - } else { - /* Process double blocks until we run out */ - memcpy(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - in += ORANGISH_RATE; - inlen -= ORANGISH_RATE; - while (inlen > ORANGISH_RATE) { - lw_xor_block(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - in += ORANGISH_RATE; - inlen -= ORANGISH_RATE; - } - temp = (unsigned)inlen; - if (temp < ORANGISH_RATE) { - /* Last double block is partial */ - lw_xor_block(state, in, temp); - state[temp] ^= 0x01; /* padding */ - photon256_permute(state); - lw_xor_block(state + 16, in, temp); - state[16 + temp] ^= 0x01; /* padding */ - state[0] ^= 0x02; /* domain separation */ - } else { - /* Last double block is full */ - lw_xor_block(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - state[0] ^= 0x01; /* domain separation */ - } - } - photon256_permute(state); - memcpy(out, state, 16); - photon256_permute(state); - memcpy(out + 16, state, 16); - return 0; -} diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.h b/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.h deleted file mode 100644 index de5b00c..0000000 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys-avr/orange.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ORANGE_H -#define LWCRYPTO_ORANGE_H - -#include "aead-common.h" - -/** - * \file orange.h - * \brief ORANGE authenticated encryption algorithm. - * - * ORANGE is a family of algorithms built around the PHOTON-256 permutation. - * There are two members of the family at present: - * - * \li ORANGE-Zest is an authenticated encryption algorithm with a 128-bit - * key, a 128-bit nonce, and a 128-bit tag. - * \li ORANGISH is a hash algorithm with a 256-bit output. - * - * References: https://www.isical.ac.in/~lightweight/Orange/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ORANGE-Zest. - */ -#define ORANGE_ZEST_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for ORANGE-Zest. - */ -#define ORANGE_ZEST_TAG_SIZE 16 - -/** - * \brief Size of the nonce for ORANGE-Zest. - */ -#define ORANGE_ZEST_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for the ORANGISH hash algorithm. - */ -#define ORANGISH_HASH_SIZE 32 - -/** - * \brief Meta-information block for the ORANGE-Zest cipher. - */ -extern aead_cipher_t const orange_zest_cipher; - -/** - * \brief Meta-information block for the ORANGISH hash algorithm. - */ -extern aead_hash_algorithm_t const orangish_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with ORANGE-Zest. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa orange_zest_aead_decrypt() - */ -int orange_zest_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ORANGE-Zest. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa orange_zest_aead_encrypt() - */ -int orange_zest_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ORANGISH to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ORANGISH_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int orangish_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/orange/Implementations/crypto_aead/orangezestv1/rhys/internal-util.h b/orange/Implementations/crypto_aead/orangezestv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/orange/Implementations/crypto_aead/orangezestv1/rhys/internal-util.h +++ b/orange/Implementations/crypto_aead/orangezestv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.c b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.h b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/api.h b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/hash.c b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/hash.c deleted file mode 100644 index c652a6f..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "orange.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return orangish_hash(out, in, inlen); -} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.c b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.c deleted file mode 100644 index b8743fe..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.c +++ /dev/null @@ -1,479 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-photon256.h" -#include "internal-util.h" - -/** - * \brief Number of rounds in the PHOTON-256 permutation in bit-sliced form. - */ -#define PHOTON256_ROUNDS 12 - -/* Round constants for PHOTON-256 */ -static uint32_t const photon256_rc[PHOTON256_ROUNDS] = { - 0x96d2f0e1, 0xb4f0d2c3, 0xf0b49687, 0x692d0f1e, - 0x5a1e3c2d, 0x3c785a4b, 0xe1a58796, 0x4b0f2d3c, - 0x1e5a7869, 0xa5e1c3d2, 0xd296b4a5, 0x2d694b5a -}; - -/** - * \brief Evaluates the PHOTON-256 S-box in bit-sliced form. - * - * \param x0 Slice with bit 0 of all nibbles. - * \param x1 Slice with bit 1 of all nibbles. - * \param x2 Slice with bit 2 of all nibbles. - * \param x3 Slice with bit 3 of all nibbles. - * - * This bit-sliced S-box implementation is based on the AVR version - * "add_avr8_bitslice_asm" from the PHOTON-Beetle reference code. - */ -#define photon256_sbox(x0, x1, x2, x3) \ - do { \ - x1 ^= x2; \ - x3 ^= (x2 & x1); \ - t1 = x3; \ - x3 = (x3 & x1) ^ x2; \ - t2 = x3; \ - x3 ^= x0; \ - x3 = ~(x3); \ - x2 = x3; \ - t2 |= x0; \ - x0 ^= t1; \ - x1 ^= x0; \ - x2 |= x1; \ - x2 ^= t1; \ - x1 ^= t2; \ - x3 ^= x1; \ - } while (0) - -/** - * \brief Performs a field multiplication on the 8 nibbles in a row. - * - * \param a Field constant to multiply by. - * \param x Bit-sliced form of the row, with bits 0..3 of each nibble - * in bytes 0..3 of the word. - * - * \return a * x packed into the bytes of a word. - */ -static uint32_t photon256_field_multiply(uint8_t a, uint32_t x) -{ - /* For each 4-bit nibble we need to do this: - * - * result = 0; - * for (bit = 0; bit < 4; ++ bit) { - * if ((a & (1 << bit)) != 0) - * result ^= x; - * if ((x & 0x08) != 0) { - * x = (x << 1) ^ 3; - * } else { - * x = (x << 1); - * } - * } - * - * We don't need to worry about constant time for "a" because it is a - * known constant that isn't data-dependent. But we do need to worry - * about constant time for "x" as it is data. - */ - uint32_t result = 0; - uint32_t t; - #define PARALLEL_CONDITIONAL_ADD(bit) \ - do { \ - if ((a) & (1 << (bit))) \ - result ^= x; \ - } while (0) - #define PARALELL_ROTATE() \ - do { \ - t = x >> 24; \ - x = (x << 8) ^ t ^ (t << 8); \ - } while (0) - PARALLEL_CONDITIONAL_ADD(0); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(1); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(2); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(3); - return result; -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Converts a PHOTON-256 state into bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_to_sliced - (uint32_t out[PHOTON256_STATE_SIZE / 4], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* We first scatter bits 0..3 of the nibbles to bytes 0..3 of the words. - * Then we rearrange the bytes to group all bits N into word N. - * - * Permutation generated with "http://programming.sirrida.de/calcperm.php". - * - * P = [0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 - * 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31] - */ - uint32_t t0, t1, t2, t3; - #define TO_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - #define FROM_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - t0 = le_load_word32(in); - t1 = le_load_word32(in + 4); - t2 = le_load_word32(in + 8); - t3 = le_load_word32(in + 12); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[0] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[1] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[2] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[3] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); - t0 = le_load_word32(in + 16); - t1 = le_load_word32(in + 20); - t2 = le_load_word32(in + 24); - t3 = le_load_word32(in + 28); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[4] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[5] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[6] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[7] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); -} - -/** - * \brief Converts a PHOTON-256 state from bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_from_sliced - (unsigned char out[PHOTON256_STATE_SIZE], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* Do the reverse of photon256_to_sliced() */ - uint32_t x0, x1, x2, x3; - x0 = ((uint32_t)(in[0])) | - (((uint32_t)(in[4])) << 8) | - (((uint32_t)(in[8])) << 16) | - (((uint32_t)(in[12])) << 24); - x1 = ((uint32_t)(in[1])) | - (((uint32_t)(in[5])) << 8) | - (((uint32_t)(in[9])) << 16) | - (((uint32_t)(in[13])) << 24); - x2 = ((uint32_t)(in[2])) | - (((uint32_t)(in[6])) << 8) | - (((uint32_t)(in[10])) << 16) | - (((uint32_t)(in[14])) << 24); - x3 = ((uint32_t)(in[3])) | - (((uint32_t)(in[7])) << 8) | - (((uint32_t)(in[11])) << 16) | - (((uint32_t)(in[15])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out, x0); - le_store_word32(out + 4, x1); - le_store_word32(out + 8, x2); - le_store_word32(out + 12, x3); - x0 = ((uint32_t)(in[16])) | - (((uint32_t)(in[20])) << 8) | - (((uint32_t)(in[24])) << 16) | - (((uint32_t)(in[28])) << 24); - x1 = ((uint32_t)(in[17])) | - (((uint32_t)(in[21])) << 8) | - (((uint32_t)(in[25])) << 16) | - (((uint32_t)(in[29])) << 24); - x2 = ((uint32_t)(in[18])) | - (((uint32_t)(in[22])) << 8) | - (((uint32_t)(in[26])) << 16) | - (((uint32_t)(in[30])) << 24); - x3 = ((uint32_t)(in[19])) | - (((uint32_t)(in[23])) << 8) | - (((uint32_t)(in[27])) << 16) | - (((uint32_t)(in[31])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out + 16, x0); - le_store_word32(out + 20, x1); - le_store_word32(out + 24, x2); - le_store_word32(out + 28, x3); -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -/* Index the bit-sliced state bytes in little-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[(row)] = (uint8_t)(value); \ - S.bytes[(row) + 4] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 8] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 12] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[(row) + 12] = (uint8_t)(value); \ - S.bytes[(row) + 16] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 20] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 24] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#else -/* Index the bit-sliced state bytes in big-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[3 - (row)] = (uint8_t)(value); \ - S.bytes[7 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[11 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[15 - (row)] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[20 - (row)] = (uint8_t)(value); \ - S.bytes[24 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[28 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[32 - (row)] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#endif - -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]) -{ - union { - uint32_t words[PHOTON256_STATE_SIZE / 4]; - uint8_t bytes[PHOTON256_STATE_SIZE]; - } S; - uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - uint8_t round; - - /* Convert the state into bit-sliced form */ - photon256_to_sliced(S.words, state); - - /* Perform all 12 permutation rounds */ - for (round = 0; round < PHOTON256_ROUNDS; ++round) { - /* Add the constants for this round */ - t0 = photon256_rc[round]; - S.words[0] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[1] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[2] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[3] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[4] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[5] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[6] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[7] ^= t0 & 0x01010101U; - - /* Apply the sbox to all nibbles in the state */ - photon256_sbox(S.words[0], S.words[1], S.words[2], S.words[3]); - photon256_sbox(S.words[4], S.words[5], S.words[6], S.words[7]); - - /* Rotate all rows left by the row number. - * - * We do this by applying permutations to the top and bottom words - * to rearrange the bits into the rotated form. Permutations - * generated with "http://programming.sirrida.de/calcperm.php". - * - * P_top = [0 1 2 3 4 5 6 7 15 8 9 10 11 12 13 14 22 23 - * 16 17 18 19 20 21 29 30 31 24 25 26 27 28] - * P_bot = [4 5 6 7 0 1 2 3 11 12 13 14 15 8 9 10 18 19 - * 20 21 22 23 16 17 25 26 27 28 29 30 31 24 - */ - #define TOP_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x07030100, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - #define BOTTOM_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x080c0e0f, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - TOP_ROTATE_PERM(S.words[0]); - TOP_ROTATE_PERM(S.words[1]); - TOP_ROTATE_PERM(S.words[2]); - TOP_ROTATE_PERM(S.words[3]); - BOTTOM_ROTATE_PERM(S.words[4]); - BOTTOM_ROTATE_PERM(S.words[5]); - BOTTOM_ROTATE_PERM(S.words[6]); - BOTTOM_ROTATE_PERM(S.words[7]); - - /* Mix the columns */ - #define MUL(a, x) (photon256_field_multiply((a), (x))) - t0 = READ_ROW0(); - t1 = READ_ROW1(); - t2 = READ_ROW2(); - t3 = READ_ROW3(); - t4 = READ_ROW4(); - t5 = READ_ROW5(); - t6 = READ_ROW6(); - t7 = READ_ROW7(); - t8 = MUL(0x02, t0) ^ MUL(0x04, t1) ^ MUL(0x02, t2) ^ MUL(0x0b, t3) ^ - MUL(0x02, t4) ^ MUL(0x08, t5) ^ MUL(0x05, t6) ^ MUL(0x06, t7); - WRITE_ROW(0, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x09, t1) ^ MUL(0x08, t2) ^ MUL(0x0d, t3) ^ - MUL(0x07, t4) ^ MUL(0x07, t5) ^ MUL(0x05, t6) ^ MUL(0x02, t7); - WRITE_ROW(1, t8); - t8 = MUL(0x04, t0) ^ MUL(0x04, t1) ^ MUL(0x0d, t2) ^ MUL(0x0d, t3) ^ - MUL(0x09, t4) ^ MUL(0x04, t5) ^ MUL(0x0d, t6) ^ MUL(0x09, t7); - WRITE_ROW(2, t8); - t8 = MUL(0x01, t0) ^ MUL(0x06, t1) ^ MUL(0x05, t2) ^ MUL(0x01, t3) ^ - MUL(0x0c, t4) ^ MUL(0x0d, t5) ^ MUL(0x0f, t6) ^ MUL(0x0e, t7); - WRITE_ROW(3, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x0c, t1) ^ MUL(0x09, t2) ^ MUL(0x0d, t3) ^ - MUL(0x0e, t4) ^ MUL(0x05, t5) ^ MUL(0x0e, t6) ^ MUL(0x0d, t7); - WRITE_ROW(4, t8); - t8 = MUL(0x09, t0) ^ MUL(0x0e, t1) ^ MUL(0x05, t2) ^ MUL(0x0f, t3) ^ - MUL(0x04, t4) ^ MUL(0x0c, t5) ^ MUL(0x09, t6) ^ MUL(0x06, t7); - WRITE_ROW(5, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x02, t1) ^ MUL(0x02, t2) ^ MUL(0x0a, t3) ^ - MUL(0x03, t4) ^ MUL(0x01, t5) ^ MUL(0x01, t6) ^ MUL(0x0e, t7); - WRITE_ROW(6, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x01, t1) ^ MUL(0x0d, t2) ^ MUL(0x0a, t3) ^ - MUL(0x05, t4) ^ MUL(0x0a, t5) ^ MUL(0x02, t6) ^ MUL(0x03, t7); - WRITE_ROW(7, t8); - } - - /* Convert back from bit-sliced form to regular form */ - photon256_from_sliced(state, S.bytes); -} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.h b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.h deleted file mode 100644 index ce8729a..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-photon256.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_PHOTON256_H -#define LW_INTERNAL_PHOTON256_H - -/** - * \file internal-photon256.h - * \brief Internal implementation of the PHOTON-256 permutation. - * - * Warning: The current implementation of PHOTON-256 is constant-time - * but not constant-cache. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the PHOTON-256 permutation state in bytes. - */ -#define PHOTON256_STATE_SIZE 32 - -/** - * \brief Permutes the PHOTON-256 state. - * - * \param state The state to be permuted. - */ -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-util.h b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.c b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.c deleted file mode 100644 index 641e117..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.c +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "orange.h" -#include "internal-photon256.h" -#include "internal-util.h" -#include - -aead_cipher_t const orange_zest_cipher = { - "ORANGE-Zest", - ORANGE_ZEST_KEY_SIZE, - ORANGE_ZEST_NONCE_SIZE, - ORANGE_ZEST_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - orange_zest_aead_encrypt, - orange_zest_aead_decrypt -}; - -aead_hash_algorithm_t const orangish_hash_algorithm = { - "ORANGISH", - sizeof(int), - ORANGISH_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - orangish_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Doubles a block in the GF(128) field a number of times. - * - * \param block The block to be doubled. - * \param value The number of times to double the block. - */ -static void orange_block_double(unsigned char block[16], unsigned char value) -{ - unsigned index; - unsigned char mask; - while (value > 0) { - mask = (unsigned char)(((signed char)(block[15])) >> 7); - for (index = 15; index > 0; --index) - block[index] = (block[index] << 1) | (block[index - 1] >> 7); - block[0] = (block[0] << 1) ^ (mask & 0x87); - --value; - } -} - -/** - * \brief Rotates a block left by 1 bit. - * - * \param out The output block to be set to the rotated version. - * \param in The input block to be rotated, must not overlap with \a out. - */ -static void orange_block_rotate - (unsigned char out[16], const unsigned char in[16]) -{ - unsigned index; - for (index = 15; index > 0; --index) - out[index] = (in[index] << 1) | (in[index - 1] >> 7); - out[0] = (in[0] << 1) | (in[15] >> 7); -} - -/** - * \brief Hash input data with ORANGE. - * - * \param state PHOTON-256 permutation state. - * \param data Points to the data to be hashed. - * \param len Length of the data to be hashed, must not be zero. - * \param domain0 Domain separation value for full last block. - * \param domain1 Domain separation value for partial last block. - */ -static void orange_process_hash - (unsigned char state[PHOTON256_STATE_SIZE], - const unsigned char *data, unsigned long long len, - unsigned char domain0, unsigned char domain1) -{ - unsigned temp; - while (len > PHOTON256_STATE_SIZE) { - photon256_permute(state); - lw_xor_block(state, data, PHOTON256_STATE_SIZE); - data += PHOTON256_STATE_SIZE; - len -= PHOTON256_STATE_SIZE; - } - photon256_permute(state); - temp = (unsigned)len; - if (temp < PHOTON256_STATE_SIZE) { - orange_block_double(state + 16, domain1); - state[temp] ^= 0x01; /* padding */ - } else { - orange_block_double(state + 16, domain0); - } - lw_xor_block(state, data, temp); -} - -/** - * \brief Applies the rho function to the ORANGE state. - * - * \param KS Output keystream to use to encrypt the plaintext or to - * decrypt the ciphertext. - * \param S Rolling key state. - * \param state Rolling PHOTON-256 permutation state. - */ -static void orange_rho - (unsigned char KS[32], unsigned char S[16], const unsigned char state[32]) -{ - orange_block_double(S, 1); - orange_block_rotate(KS, state); - lw_xor_block_2_src(KS + 16, state + 16, S, 16); - memcpy(S, state + 16, 16); -} - -/** - * \brief Encrypts plaintext with ORANGE. - * - * \param state PHOTON-256 permutation state. - * \param k Points to the key for the cipher. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param len Length of the plaintext in bytes, must not be zero. - */ -static void orange_encrypt - (unsigned char state[PHOTON256_STATE_SIZE], const unsigned char *k, - unsigned char *c, const unsigned char *m, unsigned long long len) -{ - unsigned char S[ORANGE_ZEST_KEY_SIZE]; - unsigned char KS[PHOTON256_STATE_SIZE]; - unsigned temp; - memcpy(S, k, ORANGE_ZEST_KEY_SIZE); - while (len > PHOTON256_STATE_SIZE) { - photon256_permute(state); - orange_rho(KS, S, state); - lw_xor_block_2_src(c, m, KS, PHOTON256_STATE_SIZE); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - c += PHOTON256_STATE_SIZE; - m += PHOTON256_STATE_SIZE; - len -= PHOTON256_STATE_SIZE; - } - photon256_permute(state); - temp = (unsigned)len; - if (temp < PHOTON256_STATE_SIZE) { - orange_block_double(state + 16, 2); - orange_rho(KS, S, state); - lw_xor_block_2_src(c, m, KS, temp); - lw_xor_block(state, c, temp); - state[temp] ^= 0x01; /* padding */ - } else { - orange_block_double(state + 16, 1); - orange_rho(KS, S, state); - lw_xor_block_2_src(c, m, KS, PHOTON256_STATE_SIZE); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - } -} - -/** - * \brief Decrypts ciphertext with ORANGE. - * - * \param state PHOTON-256 permutation state. - * \param k Points to the key for the cipher. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param len Length of the plaintext in bytes, must not be zero. - */ -static void orange_decrypt - (unsigned char state[PHOTON256_STATE_SIZE], const unsigned char *k, - unsigned char *m, const unsigned char *c, unsigned long long len) -{ - unsigned char S[ORANGE_ZEST_KEY_SIZE]; - unsigned char KS[PHOTON256_STATE_SIZE]; - unsigned temp; - memcpy(S, k, ORANGE_ZEST_KEY_SIZE); - while (len > PHOTON256_STATE_SIZE) { - photon256_permute(state); - orange_rho(KS, S, state); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - lw_xor_block_2_src(m, c, KS, PHOTON256_STATE_SIZE); - c += PHOTON256_STATE_SIZE; - m += PHOTON256_STATE_SIZE; - len -= PHOTON256_STATE_SIZE; - } - photon256_permute(state); - temp = (unsigned)len; - if (temp < PHOTON256_STATE_SIZE) { - orange_block_double(state + 16, 2); - orange_rho(KS, S, state); - lw_xor_block(state, c, temp); - lw_xor_block_2_src(m, c, KS, temp); - state[temp] ^= 0x01; /* padding */ - } else { - orange_block_double(state + 16, 1); - orange_rho(KS, S, state); - lw_xor_block(state, c, PHOTON256_STATE_SIZE); - lw_xor_block_2_src(m, c, KS, PHOTON256_STATE_SIZE); - } -} - -/** - * \brief Generates the authentication tag for ORANGE-Zest. - * - * \param state PHOTON-256 permutation state. - * - * The tag will be left in the leading bytes of the state on exit. - */ -static void orange_generate_tag(unsigned char state[PHOTON256_STATE_SIZE]) -{ - /* Swap the two halves of the state and run the permutation again */ - unsigned posn; - for (posn = 0; posn < (PHOTON256_STATE_SIZE / 2); ++posn) { - unsigned char temp = state[posn]; - state[posn] = state[posn + (PHOTON256_STATE_SIZE / 2)]; - state[posn + (PHOTON256_STATE_SIZE / 2)] = temp; - } - photon256_permute(state); -} - -int orange_zest_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ORANGE_ZEST_TAG_SIZE; - - /* Initialize the PHOTON-256 state with the nonce and key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Handle the associated data and message payload */ - if (adlen == 0) { - if (mlen == 0) { - state[16] ^= 2; /* domain separation */ - photon256_permute(state); - memcpy(c + mlen, state, ORANGE_ZEST_TAG_SIZE); - return 0; - } else { - state[16] ^= 1; /* domain separation */ - orange_encrypt(state, k, c, m, mlen); - } - } else { - orange_process_hash(state, ad, adlen, 1, 2); - if (mlen != 0) - orange_encrypt(state, k, c, m, mlen); - } - - /* Generate the authentication tag */ - orange_generate_tag(state); - memcpy(c + mlen, state, ORANGE_ZEST_TAG_SIZE); - return 0; -} - -int orange_zest_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ORANGE_ZEST_TAG_SIZE) - return -1; - *mlen = clen - ORANGE_ZEST_TAG_SIZE; - - /* Initialize the PHOTON-256 state with the nonce and key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Handle the associated data and message payload */ - clen -= ORANGE_ZEST_TAG_SIZE; - if (adlen == 0) { - if (clen == 0) { - state[16] ^= 2; /* domain separation */ - photon256_permute(state); - return aead_check_tag(m, 0, state, c, ORANGE_ZEST_TAG_SIZE); - } else { - state[16] ^= 1; /* domain separation */ - orange_decrypt(state, k, m, c, clen); - } - } else { - orange_process_hash(state, ad, adlen, 1, 2); - if (clen != 0) - orange_decrypt(state, k, m, c, clen); - } - - /* Check the authentication tag */ - orange_generate_tag(state); - return aead_check_tag(m, clen, state, c + clen, ORANGE_ZEST_TAG_SIZE); -} - -/** - * \brief Rate of absorbing data into the ORANGISH hash state. - */ -#define ORANGISH_RATE 16 - -int orangish_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - unsigned temp; - memset(state, 0, sizeof(state)); - if (inlen == 0) { - /* No absorption necessary for a zero-length input */ - } else if (inlen < ORANGISH_RATE) { - /* Single partial block */ - temp = (unsigned)inlen; - memcpy(state, in, temp); - state[temp] ^= 0x01; /* padding */ - photon256_permute(state); - lw_xor_block(state + 16, in, temp); - state[16 + temp] ^= 0x01; /* padding */ - state[0] ^= 0x02; /* domain separation */ - } else if (inlen == ORANGISH_RATE) { - /* Single full block */ - memcpy(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - state[0] ^= 0x01; /* domain separation */ - } else { - /* Process double blocks until we run out */ - memcpy(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - in += ORANGISH_RATE; - inlen -= ORANGISH_RATE; - while (inlen > ORANGISH_RATE) { - lw_xor_block(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - in += ORANGISH_RATE; - inlen -= ORANGISH_RATE; - } - temp = (unsigned)inlen; - if (temp < ORANGISH_RATE) { - /* Last double block is partial */ - lw_xor_block(state, in, temp); - state[temp] ^= 0x01; /* padding */ - photon256_permute(state); - lw_xor_block(state + 16, in, temp); - state[16 + temp] ^= 0x01; /* padding */ - state[0] ^= 0x02; /* domain separation */ - } else { - /* Last double block is full */ - lw_xor_block(state, in, ORANGISH_RATE); - photon256_permute(state); - lw_xor_block(state + 16, in, ORANGISH_RATE); - state[0] ^= 0x01; /* domain separation */ - } - } - photon256_permute(state); - memcpy(out, state, 16); - photon256_permute(state); - memcpy(out + 16, state, 16); - return 0; -} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.h b/orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.h deleted file mode 100644 index de5b00c..0000000 --- a/orange/Implementations/crypto_hash/orangishv1/rhys-avr/orange.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ORANGE_H -#define LWCRYPTO_ORANGE_H - -#include "aead-common.h" - -/** - * \file orange.h - * \brief ORANGE authenticated encryption algorithm. - * - * ORANGE is a family of algorithms built around the PHOTON-256 permutation. - * There are two members of the family at present: - * - * \li ORANGE-Zest is an authenticated encryption algorithm with a 128-bit - * key, a 128-bit nonce, and a 128-bit tag. - * \li ORANGISH is a hash algorithm with a 256-bit output. - * - * References: https://www.isical.ac.in/~lightweight/Orange/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for ORANGE-Zest. - */ -#define ORANGE_ZEST_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for ORANGE-Zest. - */ -#define ORANGE_ZEST_TAG_SIZE 16 - -/** - * \brief Size of the nonce for ORANGE-Zest. - */ -#define ORANGE_ZEST_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for the ORANGISH hash algorithm. - */ -#define ORANGISH_HASH_SIZE 32 - -/** - * \brief Meta-information block for the ORANGE-Zest cipher. - */ -extern aead_cipher_t const orange_zest_cipher; - -/** - * \brief Meta-information block for the ORANGISH hash algorithm. - */ -extern aead_hash_algorithm_t const orangish_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with ORANGE-Zest. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa orange_zest_aead_decrypt() - */ -int orange_zest_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with ORANGE-Zest. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa orange_zest_aead_encrypt() - */ -int orange_zest_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with ORANGISH to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ORANGISH_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int orangish_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.c b/orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.h b/orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/api.h b/orange/Implementations/crypto_hash/orangishv1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/hash.c b/orange/Implementations/crypto_hash/orangishv1/rhys/hash.c new file mode 100644 index 0000000..c652a6f --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "orange.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return orangish_hash(out, in, inlen); +} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.c b/orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.c new file mode 100644 index 0000000..b8743fe --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.c @@ -0,0 +1,479 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-photon256.h" +#include "internal-util.h" + +/** + * \brief Number of rounds in the PHOTON-256 permutation in bit-sliced form. + */ +#define PHOTON256_ROUNDS 12 + +/* Round constants for PHOTON-256 */ +static uint32_t const photon256_rc[PHOTON256_ROUNDS] = { + 0x96d2f0e1, 0xb4f0d2c3, 0xf0b49687, 0x692d0f1e, + 0x5a1e3c2d, 0x3c785a4b, 0xe1a58796, 0x4b0f2d3c, + 0x1e5a7869, 0xa5e1c3d2, 0xd296b4a5, 0x2d694b5a +}; + +/** + * \brief Evaluates the PHOTON-256 S-box in bit-sliced form. + * + * \param x0 Slice with bit 0 of all nibbles. + * \param x1 Slice with bit 1 of all nibbles. + * \param x2 Slice with bit 2 of all nibbles. + * \param x3 Slice with bit 3 of all nibbles. + * + * This bit-sliced S-box implementation is based on the AVR version + * "add_avr8_bitslice_asm" from the PHOTON-Beetle reference code. + */ +#define photon256_sbox(x0, x1, x2, x3) \ + do { \ + x1 ^= x2; \ + x3 ^= (x2 & x1); \ + t1 = x3; \ + x3 = (x3 & x1) ^ x2; \ + t2 = x3; \ + x3 ^= x0; \ + x3 = ~(x3); \ + x2 = x3; \ + t2 |= x0; \ + x0 ^= t1; \ + x1 ^= x0; \ + x2 |= x1; \ + x2 ^= t1; \ + x1 ^= t2; \ + x3 ^= x1; \ + } while (0) + +/** + * \brief Performs a field multiplication on the 8 nibbles in a row. + * + * \param a Field constant to multiply by. + * \param x Bit-sliced form of the row, with bits 0..3 of each nibble + * in bytes 0..3 of the word. + * + * \return a * x packed into the bytes of a word. + */ +static uint32_t photon256_field_multiply(uint8_t a, uint32_t x) +{ + /* For each 4-bit nibble we need to do this: + * + * result = 0; + * for (bit = 0; bit < 4; ++ bit) { + * if ((a & (1 << bit)) != 0) + * result ^= x; + * if ((x & 0x08) != 0) { + * x = (x << 1) ^ 3; + * } else { + * x = (x << 1); + * } + * } + * + * We don't need to worry about constant time for "a" because it is a + * known constant that isn't data-dependent. But we do need to worry + * about constant time for "x" as it is data. + */ + uint32_t result = 0; + uint32_t t; + #define PARALLEL_CONDITIONAL_ADD(bit) \ + do { \ + if ((a) & (1 << (bit))) \ + result ^= x; \ + } while (0) + #define PARALELL_ROTATE() \ + do { \ + t = x >> 24; \ + x = (x << 8) ^ t ^ (t << 8); \ + } while (0) + PARALLEL_CONDITIONAL_ADD(0); + PARALELL_ROTATE(); + PARALLEL_CONDITIONAL_ADD(1); + PARALELL_ROTATE(); + PARALLEL_CONDITIONAL_ADD(2); + PARALELL_ROTATE(); + PARALLEL_CONDITIONAL_ADD(3); + return result; +} + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/** + * \brief Converts a PHOTON-256 state into bit-sliced form. + * + * \param out Points to the converted output. + * \param in Points to the PHOTON-256 state to convert. + */ +static void photon256_to_sliced + (uint32_t out[PHOTON256_STATE_SIZE / 4], + const unsigned char in[PHOTON256_STATE_SIZE]) +{ + /* We first scatter bits 0..3 of the nibbles to bytes 0..3 of the words. + * Then we rearrange the bytes to group all bits N into word N. + * + * Permutation generated with "http://programming.sirrida.de/calcperm.php". + * + * P = [0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 + * 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31] + */ + uint32_t t0, t1, t2, t3; + #define TO_BITSLICED_PERM(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + } while (0) + #define FROM_BITSLICED_PERM(x) \ + do { \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + } while (0) + t0 = le_load_word32(in); + t1 = le_load_word32(in + 4); + t2 = le_load_word32(in + 8); + t3 = le_load_word32(in + 12); + TO_BITSLICED_PERM(t0); + TO_BITSLICED_PERM(t1); + TO_BITSLICED_PERM(t2); + TO_BITSLICED_PERM(t3); + out[0] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | + ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); + out[1] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | + ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); + out[2] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | + (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); + out[3] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | + ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); + t0 = le_load_word32(in + 16); + t1 = le_load_word32(in + 20); + t2 = le_load_word32(in + 24); + t3 = le_load_word32(in + 28); + TO_BITSLICED_PERM(t0); + TO_BITSLICED_PERM(t1); + TO_BITSLICED_PERM(t2); + TO_BITSLICED_PERM(t3); + out[4] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | + ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); + out[5] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | + ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); + out[6] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | + (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); + out[7] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | + ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); +} + +/** + * \brief Converts a PHOTON-256 state from bit-sliced form. + * + * \param out Points to the converted output. + * \param in Points to the PHOTON-256 state to convert. + */ +static void photon256_from_sliced + (unsigned char out[PHOTON256_STATE_SIZE], + const unsigned char in[PHOTON256_STATE_SIZE]) +{ + /* Do the reverse of photon256_to_sliced() */ + uint32_t x0, x1, x2, x3; + x0 = ((uint32_t)(in[0])) | + (((uint32_t)(in[4])) << 8) | + (((uint32_t)(in[8])) << 16) | + (((uint32_t)(in[12])) << 24); + x1 = ((uint32_t)(in[1])) | + (((uint32_t)(in[5])) << 8) | + (((uint32_t)(in[9])) << 16) | + (((uint32_t)(in[13])) << 24); + x2 = ((uint32_t)(in[2])) | + (((uint32_t)(in[6])) << 8) | + (((uint32_t)(in[10])) << 16) | + (((uint32_t)(in[14])) << 24); + x3 = ((uint32_t)(in[3])) | + (((uint32_t)(in[7])) << 8) | + (((uint32_t)(in[11])) << 16) | + (((uint32_t)(in[15])) << 24); + FROM_BITSLICED_PERM(x0); + FROM_BITSLICED_PERM(x1); + FROM_BITSLICED_PERM(x2); + FROM_BITSLICED_PERM(x3); + le_store_word32(out, x0); + le_store_word32(out + 4, x1); + le_store_word32(out + 8, x2); + le_store_word32(out + 12, x3); + x0 = ((uint32_t)(in[16])) | + (((uint32_t)(in[20])) << 8) | + (((uint32_t)(in[24])) << 16) | + (((uint32_t)(in[28])) << 24); + x1 = ((uint32_t)(in[17])) | + (((uint32_t)(in[21])) << 8) | + (((uint32_t)(in[25])) << 16) | + (((uint32_t)(in[29])) << 24); + x2 = ((uint32_t)(in[18])) | + (((uint32_t)(in[22])) << 8) | + (((uint32_t)(in[26])) << 16) | + (((uint32_t)(in[30])) << 24); + x3 = ((uint32_t)(in[19])) | + (((uint32_t)(in[23])) << 8) | + (((uint32_t)(in[27])) << 16) | + (((uint32_t)(in[31])) << 24); + FROM_BITSLICED_PERM(x0); + FROM_BITSLICED_PERM(x1); + FROM_BITSLICED_PERM(x2); + FROM_BITSLICED_PERM(x3); + le_store_word32(out + 16, x0); + le_store_word32(out + 20, x1); + le_store_word32(out + 24, x2); + le_store_word32(out + 28, x3); +} + +#if defined(LW_UTIL_LITTLE_ENDIAN) +/* Index the bit-sliced state bytes in little-endian byte order */ +#define READ_ROW0() \ + (((uint32_t)(S.bytes[0])) | \ + (((uint32_t)(S.bytes[4])) << 8) | \ + (((uint32_t)(S.bytes[8])) << 16) | \ + (((uint32_t)(S.bytes[12])) << 24)) +#define READ_ROW1() \ + (((uint32_t)(S.bytes[1])) | \ + (((uint32_t)(S.bytes[5])) << 8) | \ + (((uint32_t)(S.bytes[9])) << 16) | \ + (((uint32_t)(S.bytes[13])) << 24)) +#define READ_ROW2() \ + (((uint32_t)(S.bytes[2])) | \ + (((uint32_t)(S.bytes[6])) << 8) | \ + (((uint32_t)(S.bytes[10])) << 16) | \ + (((uint32_t)(S.bytes[14])) << 24)) +#define READ_ROW3() \ + (((uint32_t)(S.bytes[3])) | \ + (((uint32_t)(S.bytes[7])) << 8) | \ + (((uint32_t)(S.bytes[11])) << 16) | \ + (((uint32_t)(S.bytes[15])) << 24)) +#define READ_ROW4() \ + (((uint32_t)(S.bytes[16])) | \ + (((uint32_t)(S.bytes[20])) << 8) | \ + (((uint32_t)(S.bytes[24])) << 16) | \ + (((uint32_t)(S.bytes[28])) << 24)) +#define READ_ROW5() \ + (((uint32_t)(S.bytes[17])) | \ + (((uint32_t)(S.bytes[21])) << 8) | \ + (((uint32_t)(S.bytes[25])) << 16) | \ + (((uint32_t)(S.bytes[29])) << 24)) +#define READ_ROW6() \ + (((uint32_t)(S.bytes[18])) | \ + (((uint32_t)(S.bytes[22])) << 8) | \ + (((uint32_t)(S.bytes[26])) << 16) | \ + (((uint32_t)(S.bytes[30])) << 24)) +#define READ_ROW7() \ + (((uint32_t)(S.bytes[19])) | \ + (((uint32_t)(S.bytes[23])) << 8) | \ + (((uint32_t)(S.bytes[27])) << 16) | \ + (((uint32_t)(S.bytes[31])) << 24)) +#define WRITE_ROW(row, value) \ + do { \ + if ((row) < 4) { \ + S.bytes[(row)] = (uint8_t)(value); \ + S.bytes[(row) + 4] = (uint8_t)((value) >> 8); \ + S.bytes[(row) + 8] = (uint8_t)((value) >> 16); \ + S.bytes[(row) + 12] = (uint8_t)((value) >> 24); \ + } else { \ + S.bytes[(row) + 12] = (uint8_t)(value); \ + S.bytes[(row) + 16] = (uint8_t)((value) >> 8); \ + S.bytes[(row) + 20] = (uint8_t)((value) >> 16); \ + S.bytes[(row) + 24] = (uint8_t)((value) >> 24); \ + } \ + } while (0) +#else +/* Index the bit-sliced state bytes in big-endian byte order */ +#define READ_ROW0() \ + (((uint32_t)(S.bytes[3])) | \ + (((uint32_t)(S.bytes[7])) << 8) | \ + (((uint32_t)(S.bytes[11])) << 16) | \ + (((uint32_t)(S.bytes[15])) << 24)) +#define READ_ROW1() \ + (((uint32_t)(S.bytes[2])) | \ + (((uint32_t)(S.bytes[6])) << 8) | \ + (((uint32_t)(S.bytes[10])) << 16) | \ + (((uint32_t)(S.bytes[14])) << 24)) +#define READ_ROW2() \ + (((uint32_t)(S.bytes[1])) | \ + (((uint32_t)(S.bytes[5])) << 8) | \ + (((uint32_t)(S.bytes[9])) << 16) | \ + (((uint32_t)(S.bytes[13])) << 24)) +#define READ_ROW3() \ + (((uint32_t)(S.bytes[0])) | \ + (((uint32_t)(S.bytes[4])) << 8) | \ + (((uint32_t)(S.bytes[8])) << 16) | \ + (((uint32_t)(S.bytes[12])) << 24)) +#define READ_ROW4() \ + (((uint32_t)(S.bytes[19])) | \ + (((uint32_t)(S.bytes[23])) << 8) | \ + (((uint32_t)(S.bytes[27])) << 16) | \ + (((uint32_t)(S.bytes[31])) << 24)) +#define READ_ROW5() \ + (((uint32_t)(S.bytes[18])) | \ + (((uint32_t)(S.bytes[22])) << 8) | \ + (((uint32_t)(S.bytes[26])) << 16) | \ + (((uint32_t)(S.bytes[30])) << 24)) +#define READ_ROW6() \ + (((uint32_t)(S.bytes[17])) | \ + (((uint32_t)(S.bytes[21])) << 8) | \ + (((uint32_t)(S.bytes[25])) << 16) | \ + (((uint32_t)(S.bytes[29])) << 24)) +#define READ_ROW7() \ + (((uint32_t)(S.bytes[16])) | \ + (((uint32_t)(S.bytes[20])) << 8) | \ + (((uint32_t)(S.bytes[24])) << 16) | \ + (((uint32_t)(S.bytes[28])) << 24)) +#define WRITE_ROW(row, value) \ + do { \ + if ((row) < 4) { \ + S.bytes[3 - (row)] = (uint8_t)(value); \ + S.bytes[7 - (row)] = (uint8_t)((value) >> 8); \ + S.bytes[11 - (row)] = (uint8_t)((value) >> 16); \ + S.bytes[15 - (row)] = (uint8_t)((value) >> 24); \ + } else { \ + S.bytes[20 - (row)] = (uint8_t)(value); \ + S.bytes[24 - (row)] = (uint8_t)((value) >> 8); \ + S.bytes[28 - (row)] = (uint8_t)((value) >> 16); \ + S.bytes[32 - (row)] = (uint8_t)((value) >> 24); \ + } \ + } while (0) +#endif + +void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]) +{ + union { + uint32_t words[PHOTON256_STATE_SIZE / 4]; + uint8_t bytes[PHOTON256_STATE_SIZE]; + } S; + uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8; + uint8_t round; + + /* Convert the state into bit-sliced form */ + photon256_to_sliced(S.words, state); + + /* Perform all 12 permutation rounds */ + for (round = 0; round < PHOTON256_ROUNDS; ++round) { + /* Add the constants for this round */ + t0 = photon256_rc[round]; + S.words[0] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[1] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[2] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[3] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[4] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[5] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[6] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[7] ^= t0 & 0x01010101U; + + /* Apply the sbox to all nibbles in the state */ + photon256_sbox(S.words[0], S.words[1], S.words[2], S.words[3]); + photon256_sbox(S.words[4], S.words[5], S.words[6], S.words[7]); + + /* Rotate all rows left by the row number. + * + * We do this by applying permutations to the top and bottom words + * to rearrange the bits into the rotated form. Permutations + * generated with "http://programming.sirrida.de/calcperm.php". + * + * P_top = [0 1 2 3 4 5 6 7 15 8 9 10 11 12 13 14 22 23 + * 16 17 18 19 20 21 29 30 31 24 25 26 27 28] + * P_bot = [4 5 6 7 0 1 2 3 11 12 13 14 15 8 9 10 18 19 + * 20 21 22 23 16 17 25 26 27 28 29 30 31 24 + */ + #define TOP_ROTATE_PERM(x) \ + do { \ + t1 = (x); \ + bit_permute_step(t1, 0x07030100, 4); \ + bit_permute_step(t1, 0x22331100, 2); \ + bit_permute_step(t1, 0x55005500, 1); \ + (x) = t1; \ + } while (0) + #define BOTTOM_ROTATE_PERM(x) \ + do { \ + t1 = (x); \ + bit_permute_step(t1, 0x080c0e0f, 4); \ + bit_permute_step(t1, 0x22331100, 2); \ + bit_permute_step(t1, 0x55005500, 1); \ + (x) = t1; \ + } while (0) + TOP_ROTATE_PERM(S.words[0]); + TOP_ROTATE_PERM(S.words[1]); + TOP_ROTATE_PERM(S.words[2]); + TOP_ROTATE_PERM(S.words[3]); + BOTTOM_ROTATE_PERM(S.words[4]); + BOTTOM_ROTATE_PERM(S.words[5]); + BOTTOM_ROTATE_PERM(S.words[6]); + BOTTOM_ROTATE_PERM(S.words[7]); + + /* Mix the columns */ + #define MUL(a, x) (photon256_field_multiply((a), (x))) + t0 = READ_ROW0(); + t1 = READ_ROW1(); + t2 = READ_ROW2(); + t3 = READ_ROW3(); + t4 = READ_ROW4(); + t5 = READ_ROW5(); + t6 = READ_ROW6(); + t7 = READ_ROW7(); + t8 = MUL(0x02, t0) ^ MUL(0x04, t1) ^ MUL(0x02, t2) ^ MUL(0x0b, t3) ^ + MUL(0x02, t4) ^ MUL(0x08, t5) ^ MUL(0x05, t6) ^ MUL(0x06, t7); + WRITE_ROW(0, t8); + t8 = MUL(0x0c, t0) ^ MUL(0x09, t1) ^ MUL(0x08, t2) ^ MUL(0x0d, t3) ^ + MUL(0x07, t4) ^ MUL(0x07, t5) ^ MUL(0x05, t6) ^ MUL(0x02, t7); + WRITE_ROW(1, t8); + t8 = MUL(0x04, t0) ^ MUL(0x04, t1) ^ MUL(0x0d, t2) ^ MUL(0x0d, t3) ^ + MUL(0x09, t4) ^ MUL(0x04, t5) ^ MUL(0x0d, t6) ^ MUL(0x09, t7); + WRITE_ROW(2, t8); + t8 = MUL(0x01, t0) ^ MUL(0x06, t1) ^ MUL(0x05, t2) ^ MUL(0x01, t3) ^ + MUL(0x0c, t4) ^ MUL(0x0d, t5) ^ MUL(0x0f, t6) ^ MUL(0x0e, t7); + WRITE_ROW(3, t8); + t8 = MUL(0x0f, t0) ^ MUL(0x0c, t1) ^ MUL(0x09, t2) ^ MUL(0x0d, t3) ^ + MUL(0x0e, t4) ^ MUL(0x05, t5) ^ MUL(0x0e, t6) ^ MUL(0x0d, t7); + WRITE_ROW(4, t8); + t8 = MUL(0x09, t0) ^ MUL(0x0e, t1) ^ MUL(0x05, t2) ^ MUL(0x0f, t3) ^ + MUL(0x04, t4) ^ MUL(0x0c, t5) ^ MUL(0x09, t6) ^ MUL(0x06, t7); + WRITE_ROW(5, t8); + t8 = MUL(0x0c, t0) ^ MUL(0x02, t1) ^ MUL(0x02, t2) ^ MUL(0x0a, t3) ^ + MUL(0x03, t4) ^ MUL(0x01, t5) ^ MUL(0x01, t6) ^ MUL(0x0e, t7); + WRITE_ROW(6, t8); + t8 = MUL(0x0f, t0) ^ MUL(0x01, t1) ^ MUL(0x0d, t2) ^ MUL(0x0a, t3) ^ + MUL(0x05, t4) ^ MUL(0x0a, t5) ^ MUL(0x02, t6) ^ MUL(0x03, t7); + WRITE_ROW(7, t8); + } + + /* Convert back from bit-sliced form to regular form */ + photon256_from_sliced(state, S.bytes); +} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.h b/orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.h new file mode 100644 index 0000000..ce8729a --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/internal-photon256.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_PHOTON256_H +#define LW_INTERNAL_PHOTON256_H + +/** + * \file internal-photon256.h + * \brief Internal implementation of the PHOTON-256 permutation. + * + * Warning: The current implementation of PHOTON-256 is constant-time + * but not constant-cache. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the PHOTON-256 permutation state in bytes. + */ +#define PHOTON256_STATE_SIZE 32 + +/** + * \brief Permutes the PHOTON-256 state. + * + * \param state The state to be permuted. + */ +void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/internal-util.h b/orange/Implementations/crypto_hash/orangishv1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/orange.c b/orange/Implementations/crypto_hash/orangishv1/rhys/orange.c new file mode 100644 index 0000000..641e117 --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/orange.c @@ -0,0 +1,384 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "orange.h" +#include "internal-photon256.h" +#include "internal-util.h" +#include + +aead_cipher_t const orange_zest_cipher = { + "ORANGE-Zest", + ORANGE_ZEST_KEY_SIZE, + ORANGE_ZEST_NONCE_SIZE, + ORANGE_ZEST_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + orange_zest_aead_encrypt, + orange_zest_aead_decrypt +}; + +aead_hash_algorithm_t const orangish_hash_algorithm = { + "ORANGISH", + sizeof(int), + ORANGISH_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + orangish_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Doubles a block in the GF(128) field a number of times. + * + * \param block The block to be doubled. + * \param value The number of times to double the block. + */ +static void orange_block_double(unsigned char block[16], unsigned char value) +{ + unsigned index; + unsigned char mask; + while (value > 0) { + mask = (unsigned char)(((signed char)(block[15])) >> 7); + for (index = 15; index > 0; --index) + block[index] = (block[index] << 1) | (block[index - 1] >> 7); + block[0] = (block[0] << 1) ^ (mask & 0x87); + --value; + } +} + +/** + * \brief Rotates a block left by 1 bit. + * + * \param out The output block to be set to the rotated version. + * \param in The input block to be rotated, must not overlap with \a out. + */ +static void orange_block_rotate + (unsigned char out[16], const unsigned char in[16]) +{ + unsigned index; + for (index = 15; index > 0; --index) + out[index] = (in[index] << 1) | (in[index - 1] >> 7); + out[0] = (in[0] << 1) | (in[15] >> 7); +} + +/** + * \brief Hash input data with ORANGE. + * + * \param state PHOTON-256 permutation state. + * \param data Points to the data to be hashed. + * \param len Length of the data to be hashed, must not be zero. + * \param domain0 Domain separation value for full last block. + * \param domain1 Domain separation value for partial last block. + */ +static void orange_process_hash + (unsigned char state[PHOTON256_STATE_SIZE], + const unsigned char *data, unsigned long long len, + unsigned char domain0, unsigned char domain1) +{ + unsigned temp; + while (len > PHOTON256_STATE_SIZE) { + photon256_permute(state); + lw_xor_block(state, data, PHOTON256_STATE_SIZE); + data += PHOTON256_STATE_SIZE; + len -= PHOTON256_STATE_SIZE; + } + photon256_permute(state); + temp = (unsigned)len; + if (temp < PHOTON256_STATE_SIZE) { + orange_block_double(state + 16, domain1); + state[temp] ^= 0x01; /* padding */ + } else { + orange_block_double(state + 16, domain0); + } + lw_xor_block(state, data, temp); +} + +/** + * \brief Applies the rho function to the ORANGE state. + * + * \param KS Output keystream to use to encrypt the plaintext or to + * decrypt the ciphertext. + * \param S Rolling key state. + * \param state Rolling PHOTON-256 permutation state. + */ +static void orange_rho + (unsigned char KS[32], unsigned char S[16], const unsigned char state[32]) +{ + orange_block_double(S, 1); + orange_block_rotate(KS, state); + lw_xor_block_2_src(KS + 16, state + 16, S, 16); + memcpy(S, state + 16, 16); +} + +/** + * \brief Encrypts plaintext with ORANGE. + * + * \param state PHOTON-256 permutation state. + * \param k Points to the key for the cipher. + * \param c Points to the ciphertext output buffer. + * \param m Points to the plaintext input buffer. + * \param len Length of the plaintext in bytes, must not be zero. + */ +static void orange_encrypt + (unsigned char state[PHOTON256_STATE_SIZE], const unsigned char *k, + unsigned char *c, const unsigned char *m, unsigned long long len) +{ + unsigned char S[ORANGE_ZEST_KEY_SIZE]; + unsigned char KS[PHOTON256_STATE_SIZE]; + unsigned temp; + memcpy(S, k, ORANGE_ZEST_KEY_SIZE); + while (len > PHOTON256_STATE_SIZE) { + photon256_permute(state); + orange_rho(KS, S, state); + lw_xor_block_2_src(c, m, KS, PHOTON256_STATE_SIZE); + lw_xor_block(state, c, PHOTON256_STATE_SIZE); + c += PHOTON256_STATE_SIZE; + m += PHOTON256_STATE_SIZE; + len -= PHOTON256_STATE_SIZE; + } + photon256_permute(state); + temp = (unsigned)len; + if (temp < PHOTON256_STATE_SIZE) { + orange_block_double(state + 16, 2); + orange_rho(KS, S, state); + lw_xor_block_2_src(c, m, KS, temp); + lw_xor_block(state, c, temp); + state[temp] ^= 0x01; /* padding */ + } else { + orange_block_double(state + 16, 1); + orange_rho(KS, S, state); + lw_xor_block_2_src(c, m, KS, PHOTON256_STATE_SIZE); + lw_xor_block(state, c, PHOTON256_STATE_SIZE); + } +} + +/** + * \brief Decrypts ciphertext with ORANGE. + * + * \param state PHOTON-256 permutation state. + * \param k Points to the key for the cipher. + * \param m Points to the plaintext output buffer. + * \param c Points to the ciphertext input buffer. + * \param len Length of the plaintext in bytes, must not be zero. + */ +static void orange_decrypt + (unsigned char state[PHOTON256_STATE_SIZE], const unsigned char *k, + unsigned char *m, const unsigned char *c, unsigned long long len) +{ + unsigned char S[ORANGE_ZEST_KEY_SIZE]; + unsigned char KS[PHOTON256_STATE_SIZE]; + unsigned temp; + memcpy(S, k, ORANGE_ZEST_KEY_SIZE); + while (len > PHOTON256_STATE_SIZE) { + photon256_permute(state); + orange_rho(KS, S, state); + lw_xor_block(state, c, PHOTON256_STATE_SIZE); + lw_xor_block_2_src(m, c, KS, PHOTON256_STATE_SIZE); + c += PHOTON256_STATE_SIZE; + m += PHOTON256_STATE_SIZE; + len -= PHOTON256_STATE_SIZE; + } + photon256_permute(state); + temp = (unsigned)len; + if (temp < PHOTON256_STATE_SIZE) { + orange_block_double(state + 16, 2); + orange_rho(KS, S, state); + lw_xor_block(state, c, temp); + lw_xor_block_2_src(m, c, KS, temp); + state[temp] ^= 0x01; /* padding */ + } else { + orange_block_double(state + 16, 1); + orange_rho(KS, S, state); + lw_xor_block(state, c, PHOTON256_STATE_SIZE); + lw_xor_block_2_src(m, c, KS, PHOTON256_STATE_SIZE); + } +} + +/** + * \brief Generates the authentication tag for ORANGE-Zest. + * + * \param state PHOTON-256 permutation state. + * + * The tag will be left in the leading bytes of the state on exit. + */ +static void orange_generate_tag(unsigned char state[PHOTON256_STATE_SIZE]) +{ + /* Swap the two halves of the state and run the permutation again */ + unsigned posn; + for (posn = 0; posn < (PHOTON256_STATE_SIZE / 2); ++posn) { + unsigned char temp = state[posn]; + state[posn] = state[posn + (PHOTON256_STATE_SIZE / 2)]; + state[posn + (PHOTON256_STATE_SIZE / 2)] = temp; + } + photon256_permute(state); +} + +int orange_zest_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + ORANGE_ZEST_TAG_SIZE; + + /* Initialize the PHOTON-256 state with the nonce and key */ + memcpy(state, npub, 16); + memcpy(state + 16, k, 16); + + /* Handle the associated data and message payload */ + if (adlen == 0) { + if (mlen == 0) { + state[16] ^= 2; /* domain separation */ + photon256_permute(state); + memcpy(c + mlen, state, ORANGE_ZEST_TAG_SIZE); + return 0; + } else { + state[16] ^= 1; /* domain separation */ + orange_encrypt(state, k, c, m, mlen); + } + } else { + orange_process_hash(state, ad, adlen, 1, 2); + if (mlen != 0) + orange_encrypt(state, k, c, m, mlen); + } + + /* Generate the authentication tag */ + orange_generate_tag(state); + memcpy(c + mlen, state, ORANGE_ZEST_TAG_SIZE); + return 0; +} + +int orange_zest_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < ORANGE_ZEST_TAG_SIZE) + return -1; + *mlen = clen - ORANGE_ZEST_TAG_SIZE; + + /* Initialize the PHOTON-256 state with the nonce and key */ + memcpy(state, npub, 16); + memcpy(state + 16, k, 16); + + /* Handle the associated data and message payload */ + clen -= ORANGE_ZEST_TAG_SIZE; + if (adlen == 0) { + if (clen == 0) { + state[16] ^= 2; /* domain separation */ + photon256_permute(state); + return aead_check_tag(m, 0, state, c, ORANGE_ZEST_TAG_SIZE); + } else { + state[16] ^= 1; /* domain separation */ + orange_decrypt(state, k, m, c, clen); + } + } else { + orange_process_hash(state, ad, adlen, 1, 2); + if (clen != 0) + orange_decrypt(state, k, m, c, clen); + } + + /* Check the authentication tag */ + orange_generate_tag(state); + return aead_check_tag(m, clen, state, c + clen, ORANGE_ZEST_TAG_SIZE); +} + +/** + * \brief Rate of absorbing data into the ORANGISH hash state. + */ +#define ORANGISH_RATE 16 + +int orangish_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + unsigned temp; + memset(state, 0, sizeof(state)); + if (inlen == 0) { + /* No absorption necessary for a zero-length input */ + } else if (inlen < ORANGISH_RATE) { + /* Single partial block */ + temp = (unsigned)inlen; + memcpy(state, in, temp); + state[temp] ^= 0x01; /* padding */ + photon256_permute(state); + lw_xor_block(state + 16, in, temp); + state[16 + temp] ^= 0x01; /* padding */ + state[0] ^= 0x02; /* domain separation */ + } else if (inlen == ORANGISH_RATE) { + /* Single full block */ + memcpy(state, in, ORANGISH_RATE); + photon256_permute(state); + lw_xor_block(state + 16, in, ORANGISH_RATE); + state[0] ^= 0x01; /* domain separation */ + } else { + /* Process double blocks until we run out */ + memcpy(state, in, ORANGISH_RATE); + photon256_permute(state); + lw_xor_block(state + 16, in, ORANGISH_RATE); + in += ORANGISH_RATE; + inlen -= ORANGISH_RATE; + while (inlen > ORANGISH_RATE) { + lw_xor_block(state, in, ORANGISH_RATE); + photon256_permute(state); + lw_xor_block(state + 16, in, ORANGISH_RATE); + in += ORANGISH_RATE; + inlen -= ORANGISH_RATE; + } + temp = (unsigned)inlen; + if (temp < ORANGISH_RATE) { + /* Last double block is partial */ + lw_xor_block(state, in, temp); + state[temp] ^= 0x01; /* padding */ + photon256_permute(state); + lw_xor_block(state + 16, in, temp); + state[16 + temp] ^= 0x01; /* padding */ + state[0] ^= 0x02; /* domain separation */ + } else { + /* Last double block is full */ + lw_xor_block(state, in, ORANGISH_RATE); + photon256_permute(state); + lw_xor_block(state + 16, in, ORANGISH_RATE); + state[0] ^= 0x01; /* domain separation */ + } + } + photon256_permute(state); + memcpy(out, state, 16); + photon256_permute(state); + memcpy(out + 16, state, 16); + return 0; +} diff --git a/orange/Implementations/crypto_hash/orangishv1/rhys/orange.h b/orange/Implementations/crypto_hash/orangishv1/rhys/orange.h new file mode 100644 index 0000000..de5b00c --- /dev/null +++ b/orange/Implementations/crypto_hash/orangishv1/rhys/orange.h @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_ORANGE_H +#define LWCRYPTO_ORANGE_H + +#include "aead-common.h" + +/** + * \file orange.h + * \brief ORANGE authenticated encryption algorithm. + * + * ORANGE is a family of algorithms built around the PHOTON-256 permutation. + * There are two members of the family at present: + * + * \li ORANGE-Zest is an authenticated encryption algorithm with a 128-bit + * key, a 128-bit nonce, and a 128-bit tag. + * \li ORANGISH is a hash algorithm with a 256-bit output. + * + * References: https://www.isical.ac.in/~lightweight/Orange/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for ORANGE-Zest. + */ +#define ORANGE_ZEST_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for ORANGE-Zest. + */ +#define ORANGE_ZEST_TAG_SIZE 16 + +/** + * \brief Size of the nonce for ORANGE-Zest. + */ +#define ORANGE_ZEST_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for the ORANGISH hash algorithm. + */ +#define ORANGISH_HASH_SIZE 32 + +/** + * \brief Meta-information block for the ORANGE-Zest cipher. + */ +extern aead_cipher_t const orange_zest_cipher; + +/** + * \brief Meta-information block for the ORANGISH hash algorithm. + */ +extern aead_hash_algorithm_t const orangish_hash_algorithm; + +/** + * \brief Encrypts and authenticates a packet with ORANGE-Zest. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa orange_zest_aead_decrypt() + */ +int orange_zest_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with ORANGE-Zest. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa orange_zest_aead_encrypt() + */ +int orange_zest_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with ORANGISH to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * ORANGISH_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int orangish_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.c b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.h b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/api.h b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/api.h deleted file mode 100644 index bd8cdcb..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 8 -#define CRYPTO_ABYTES 12 -#define CRYPTO_NOOVERLAP 1 diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/encrypt.c b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/encrypt.c deleted file mode 100644 index 681e037..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "oribatida.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return oribatida_192_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return oribatida_192_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp-avr.S b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp-avr.S deleted file mode 100644 index 65fba20..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp-avr.S +++ /dev/null @@ -1,949 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global simp_256_permute - .type simp_256_permute, @function -simp_256_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r23,245 - mov r10,r23 - ldi r17,14 - mov r11,r17 - ldi r16,44 - mov r12,r16 - ldi r23,25 - mov r13,r23 - ldi r23,133 - mov r14,r23 - ldi r23,248 - mov r15,r23 - ldi r24,105 - ldi r25,51 -14: - ldi r23,17 -16: - ldd r29,Z+16 - ldd r28,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - mov r2,r29 - mov r3,r18 - mov r4,r19 - mov r5,r20 - mov r6,r21 - mov r7,r26 - mov r8,r27 - mov r9,r28 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r28 - rol r29 - adc r18,r1 - and r2,r18 - and r3,r19 - and r4,r20 - and r5,r21 - and r6,r26 - and r7,r27 - and r8,r28 - and r9,r29 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r28 - rol r29 - adc r18,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - eor r8,r28 - eor r9,r29 - ldd r0,Z+8 - eor r9,r0 - ldd r0,Z+9 - eor r8,r0 - ldd r0,Z+10 - eor r7,r0 - ldd r0,Z+11 - eor r6,r0 - ldd r0,Z+12 - eor r5,r0 - ldd r0,Z+13 - eor r4,r0 - ldd r0,Z+14 - eor r3,r0 - ldd r0,Z+15 - eor r2,r0 - ldd r0,Z+24 - eor r0,r9 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r8 - std Z+25,r0 - ldd r0,Z+26 - eor r0,r7 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r6 - std Z+27,r0 - ldd r0,Z+28 - eor r0,r5 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r4 - std Z+29,r0 - ldd r0,Z+30 - eor r0,r3 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r2 - std Z+31,r0 - ld r29,Z - ldd r28,Z+1 - ldd r27,Z+2 - ldd r26,Z+3 - ldd r21,Z+4 - ldd r20,Z+5 - ldd r19,Z+6 - ldd r18,Z+7 - mov r0,r1 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r29,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - movw r8,r28 - bst r2,0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - bld r9,7 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - ldi r17,252 - eor r18,r17 - com r19 - com r20 - com r21 - com r26 - com r27 - com r28 - com r29 - mov r0,r1 - bst r10,0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - bld r25,5 - bld r0,0 - eor r18,r0 - ldd r0,Z+8 - eor r0,r29 - std Z+8,r0 - ldd r0,Z+9 - eor r0,r28 - std Z+9,r0 - ldd r0,Z+10 - eor r0,r27 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r26 - std Z+11,r0 - ldd r0,Z+12 - eor r0,r21 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r20 - std Z+13,r0 - ldd r0,Z+14 - eor r0,r19 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r18 - std Z+15,r0 - ldd r9,Z+24 - ldd r8,Z+25 - ldd r7,Z+26 - ldd r6,Z+27 - ldd r5,Z+28 - ldd r4,Z+29 - ldd r3,Z+30 - ldd r2,Z+31 - mov r18,r9 - mov r19,r2 - mov r20,r3 - mov r21,r4 - mov r26,r5 - mov r27,r6 - mov r28,r7 - mov r29,r8 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - adc r2,r1 - and r18,r2 - and r19,r3 - and r20,r4 - and r21,r5 - and r26,r6 - and r27,r7 - and r28,r8 - and r29,r9 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - adc r2,r1 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - ld r0,Z - eor r29,r0 - ldd r0,Z+1 - eor r28,r0 - ldd r0,Z+2 - eor r27,r0 - ldd r0,Z+3 - eor r26,r0 - ldd r0,Z+4 - eor r21,r0 - ldd r0,Z+5 - eor r20,r0 - ldd r0,Z+6 - eor r19,r0 - ldd r0,Z+7 - eor r18,r0 - ldd r0,Z+16 - eor r0,r29 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r28 - std Z+17,r0 - ldd r0,Z+18 - eor r0,r27 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r26 - std Z+19,r0 - ldd r0,Z+20 - eor r0,r21 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r20 - std Z+21,r0 - ldd r0,Z+22 - eor r0,r19 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r18 - std Z+23,r0 - ldd r29,Z+8 - ldd r28,Z+9 - ldd r27,Z+10 - ldd r26,Z+11 - ldd r21,Z+12 - ldd r20,Z+13 - ldd r19,Z+14 - ldd r18,Z+15 - mov r0,r1 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r29,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - movw r8,r28 - bst r18,0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - bld r29,7 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - eor r8,r28 - eor r9,r29 - eor r2,r17 - com r3 - com r4 - com r5 - com r6 - com r7 - com r8 - com r9 - mov r0,r1 - bst r10,0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - bld r25,5 - bld r0,0 - eor r2,r0 - ld r0,Z - eor r0,r9 - st Z,r0 - ldd r0,Z+1 - eor r0,r8 - std Z+1,r0 - ldd r0,Z+2 - eor r0,r7 - std Z+2,r0 - ldd r0,Z+3 - eor r0,r6 - std Z+3,r0 - ldd r0,Z+4 - eor r0,r5 - std Z+4,r0 - ldd r0,Z+5 - eor r0,r4 - std Z+5,r0 - ldd r0,Z+6 - eor r0,r3 - std Z+6,r0 - ldd r0,Z+7 - eor r0,r2 - std Z+7,r0 - dec r23 - breq 5407f - rjmp 16b -5407: - dec r22 - brne 5409f - rjmp 475f -5409: - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - ldd r6,Z+20 - ldd r7,Z+21 - ldd r8,Z+22 - ldd r9,Z+23 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - std Z+4,r6 - std Z+5,r7 - std Z+6,r8 - std Z+7,r9 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r26 - std Z+21,r27 - std Z+22,r28 - std Z+23,r29 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - std Z+12,r6 - std Z+13,r7 - std Z+14,r8 - std Z+15,r9 - std Z+24,r18 - std Z+25,r19 - std Z+26,r20 - std Z+27,r21 - std Z+28,r26 - std Z+29,r27 - std Z+30,r28 - std Z+31,r29 - rjmp 14b -475: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size simp_256_permute, .-simp_256_permute - - .text -.global simp_192_permute - .type simp_192_permute, @function -simp_192_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r25,245 - mov r8,r25 - ldi r24,14 - mov r9,r24 - ldi r23,44 - mov r10,r23 - ldi r17,25 - mov r11,r17 - ldi r16,133 - mov r12,r16 - ldi r23,248 - mov r13,r23 - ldi r23,105 - mov r14,r23 - ldi r23,51 - mov r15,r23 -16: - ldi r23,13 -18: - ldd r27,Z+12 - ldd r26,Z+13 - ldd r21,Z+14 - ldd r20,Z+15 - ldd r19,Z+16 - ldd r18,Z+17 - mov r2,r27 - mov r3,r18 - mov r4,r19 - mov r5,r20 - mov r6,r21 - mov r7,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - adc r18,r1 - and r2,r18 - and r3,r19 - and r4,r20 - and r5,r21 - and r6,r26 - and r7,r27 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - adc r18,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - ldd r0,Z+6 - eor r7,r0 - ldd r0,Z+7 - eor r6,r0 - ldd r0,Z+8 - eor r5,r0 - ldd r0,Z+9 - eor r4,r0 - ldd r0,Z+10 - eor r3,r0 - ldd r0,Z+11 - eor r2,r0 - ldd r0,Z+18 - eor r0,r7 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r6 - std Z+19,r0 - ldd r0,Z+20 - eor r0,r5 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r4 - std Z+21,r0 - ldd r0,Z+22 - eor r0,r3 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r2 - std Z+23,r0 - ld r27,Z - ldd r26,Z+1 - ldd r21,Z+2 - ldd r20,Z+3 - ldd r19,Z+4 - ldd r18,Z+5 - mov r0,r1 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r27,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - bst r2,0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - bld r7,7 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - ldi r25,252 - eor r18,r25 - com r19 - com r20 - com r21 - com r26 - com r27 - mov r0,r1 - bst r8,0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - ror r9 - ror r8 - bld r15,5 - bld r0,0 - eor r18,r0 - ldd r0,Z+6 - eor r0,r27 - std Z+6,r0 - ldd r0,Z+7 - eor r0,r26 - std Z+7,r0 - ldd r0,Z+8 - eor r0,r21 - std Z+8,r0 - ldd r0,Z+9 - eor r0,r20 - std Z+9,r0 - ldd r0,Z+10 - eor r0,r19 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r18 - std Z+11,r0 - ldd r7,Z+18 - ldd r6,Z+19 - ldd r5,Z+20 - ldd r4,Z+21 - ldd r3,Z+22 - ldd r2,Z+23 - mov r18,r7 - mov r19,r2 - mov r20,r3 - mov r21,r4 - mov r26,r5 - mov r27,r6 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - adc r2,r1 - and r18,r2 - and r19,r3 - and r20,r4 - and r21,r5 - and r26,r6 - and r27,r7 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - adc r2,r1 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - ld r0,Z - eor r27,r0 - ldd r0,Z+1 - eor r26,r0 - ldd r0,Z+2 - eor r21,r0 - ldd r0,Z+3 - eor r20,r0 - ldd r0,Z+4 - eor r19,r0 - ldd r0,Z+5 - eor r18,r0 - ldd r0,Z+12 - eor r0,r27 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r26 - std Z+13,r0 - ldd r0,Z+14 - eor r0,r21 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r20 - std Z+15,r0 - ldd r0,Z+16 - eor r0,r19 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r18 - std Z+17,r0 - ldd r27,Z+6 - ldd r26,Z+7 - ldd r21,Z+8 - ldd r20,Z+9 - ldd r19,Z+10 - ldd r18,Z+11 - mov r0,r1 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r27,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - bst r18,0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - bld r27,7 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - eor r2,r25 - com r3 - com r4 - com r5 - com r6 - com r7 - mov r0,r1 - bst r8,0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - ror r9 - ror r8 - bld r15,5 - bld r0,0 - eor r2,r0 - ld r0,Z - eor r0,r7 - st Z,r0 - ldd r0,Z+1 - eor r0,r6 - std Z+1,r0 - ldd r0,Z+2 - eor r0,r5 - std Z+2,r0 - ldd r0,Z+3 - eor r0,r4 - std Z+3,r0 - ldd r0,Z+4 - eor r0,r3 - std Z+4,r0 - ldd r0,Z+5 - eor r0,r2 - std Z+5,r0 - dec r23 - breq 5323f - rjmp 18b -5323: - dec r22 - breq 375f - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+12 - ldd r3,Z+13 - ldd r4,Z+14 - ldd r5,Z+15 - ldd r6,Z+16 - ldd r7,Z+17 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - std Z+4,r6 - std Z+5,r7 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+16,r26 - std Z+17,r27 - ldd r18,Z+6 - ldd r19,Z+7 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r26,Z+10 - ldd r27,Z+11 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - std Z+6,r2 - std Z+7,r3 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+18,r18 - std Z+19,r19 - std Z+20,r20 - std Z+21,r21 - std Z+22,r26 - std Z+23,r27 - rjmp 16b -375: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size simp_192_permute, .-simp_192_permute - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.c b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.c deleted file mode 100644 index 5d2144e..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-simp.h" - -#if !defined(__AVR__) - -/** - * \brief Number of rounds for the inner block cipher within SimP-256. - */ -#define SIMP_256_ROUNDS 34 - -/** - * \brief Number of rounds for the inner block cipher within SimP-192. - */ -#define SIMP_192_ROUNDS 26 - -/** - * \brief Round constants for each of the rounds in SimP-256 or SimP-192. - * - * Bit i is the round constant for round i, repeated every 62 rounds. - */ -#define SIMP_RC 0x3369F885192C0EF5ULL - -void simp_256_permute(unsigned char state[SIMP_256_STATE_SIZE], unsigned steps) -{ - uint64_t z = SIMP_RC; - uint64_t x0, x1, x2, x3, t0, t1; - unsigned round; - - /* Load the state into local variables */ - x0 = be_load_word64(state); - x1 = be_load_word64(state + 8); - x2 = be_load_word64(state + 16); - x3 = be_load_word64(state + 24); - - /* Perform all steps */ - for (; steps > 0; --steps) { - /* Perform all rounds for this step, two at a time */ - for (round = 0; round < (SIMP_256_ROUNDS / 2); ++round) { - t1 = x3 ^ (leftRotate1_64(x2) & leftRotate8_64(x2)) ^ - leftRotate2_64(x2) ^ x1; - t0 = x1 ^ rightRotate3_64(x0) ^ rightRotate4_64(x0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - x2 = x2 ^ (leftRotate1_64(t1) & leftRotate8_64(t1)) ^ - leftRotate2_64(t1) ^ x0; - x0 = x0 ^ rightRotate3_64(t0) ^ rightRotate4_64(t0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - x1 = t0; - x3 = t1; - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - } - - /* Swap the words of the state for all steps except the last */ - if (steps > 1) { - t0 = x0; - t1 = x1; - x0 = x2; - x1 = x3; - x2 = t0; - x3 = t1; - } - } - - /* Write the local variables back to the state */ - be_store_word64(state, x0); - be_store_word64(state + 8, x1); - be_store_word64(state + 16, x2); - be_store_word64(state + 24, x3); -} - -/* Load a big-endian 48-bit word from a byte buffer */ -#define be_load_word48(ptr) \ - ((((uint64_t)((ptr)[0])) << 40) | \ - (((uint64_t)((ptr)[1])) << 32) | \ - (((uint64_t)((ptr)[2])) << 24) | \ - (((uint64_t)((ptr)[3])) << 16) | \ - (((uint64_t)((ptr)[4])) << 8) | \ - ((uint64_t)((ptr)[5]))) - -/* Store a big-endian 48-bit word into a byte buffer */ -#define be_store_word48(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 40); \ - (ptr)[1] = (uint8_t)(_x >> 32); \ - (ptr)[2] = (uint8_t)(_x >> 24); \ - (ptr)[3] = (uint8_t)(_x >> 16); \ - (ptr)[4] = (uint8_t)(_x >> 8); \ - (ptr)[5] = (uint8_t)_x; \ - } while (0) - -/* 48-bit rotations with the high bits set to garbage - truncated later */ -#define rightRotate3_48(x) (((x) >> 3) | ((x) << 45)) -#define rightRotate4_48(x) (((x) >> 4) | ((x) << 44)) -#define leftRotate1_48(x) (((x) << 1) | ((x) >> 47)) -#define leftRotate2_48(x) (((x) << 2) | ((x) >> 46)) -#define leftRotate8_48(x) (((x) << 8) | ((x) >> 40)) - -void simp_192_permute(unsigned char state[SIMP_192_STATE_SIZE], unsigned steps) -{ - uint64_t z = SIMP_RC; - uint64_t x0, x1, x2, x3, t0, t1; - unsigned round; - - /* Load the state into local variables */ - x0 = be_load_word48(state); - x1 = be_load_word48(state + 6); - x2 = be_load_word48(state + 12); - x3 = be_load_word48(state + 18); - - /* Perform all steps */ - for (; steps > 0; --steps) { - /* Perform all rounds for this step, two at a time */ - for (round = 0; round < (SIMP_192_ROUNDS / 2); ++round) { - t1 = x3 ^ (leftRotate1_48(x2) & leftRotate8_48(x2)) ^ - leftRotate2_48(x2) ^ x1; - t0 = x1 ^ rightRotate3_48(x0) ^ rightRotate4_48(x0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - t0 &= 0x0000FFFFFFFFFFFFULL; /* Truncate back to 48 bits */ - t1 &= 0x0000FFFFFFFFFFFFULL; - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - x2 = x2 ^ (leftRotate1_48(t1) & leftRotate8_48(t1)) ^ - leftRotate2_48(t1) ^ x0; - x0 = x0 ^ rightRotate3_48(t0) ^ rightRotate4_48(t0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - x0 &= 0x0000FFFFFFFFFFFFULL; - x2 &= 0x0000FFFFFFFFFFFFULL; - x1 = t0; - x3 = t1; - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - } - - /* Swap the words of the state for all steps except the last */ - if (steps > 1) { - t0 = x0; - t1 = x1; - x0 = x2; - x1 = x3; - x2 = t0; - x3 = t1; - } - } - - /* Write the local variables back to the state */ - be_store_word48(state, x0); - be_store_word48(state + 6, x1); - be_store_word48(state + 12, x2); - be_store_word48(state + 18, x3); -} - -#endif /* !__AVR__ */ diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.h b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.h deleted file mode 100644 index 3a95e80..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-simp.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SIMP_H -#define LW_INTERNAL_SIMP_H - -#include "internal-util.h" - -/** - * \file internal-simp.h - * \brief SimP permutation family. - * - * SimP-256 and SimP-192 are used by the Oribatida submission to - * round 2 of the NIST Lightweight Cryptography Competition. - * The permutations are built around reduced-round variants of the - * Simon-128-128 and Simon-96-96 block ciphers. - * - * References: https://www.isical.ac.in/~lightweight/oribatida/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief State size of the SimP-256 permutation. - */ -#define SIMP_256_STATE_SIZE 32 - -/** - * \brief State size of the SimP-192 permutation. - */ -#define SIMP_192_STATE_SIZE 24 - -/** - * \brief Permutes a state with SimP-256. - * - * \param state State to be permuted. - * \param steps Number of steps to perform (usually 2 or 4). - */ -void simp_256_permute(unsigned char state[SIMP_256_STATE_SIZE], unsigned steps); - -/** - * \brief Permutes a state with SimP-192. - * - * \param state State to be permuted. - * \param steps Number of steps to perform (usually 2 or 4). - */ -void simp_192_permute(unsigned char state[SIMP_192_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-util.h b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.c b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.c deleted file mode 100644 index 55a3914..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.c +++ /dev/null @@ -1,480 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "oribatida.h" -#include "internal-simp.h" -#include - -/** - * \brief Rate for processing data for the Oribatida-256-64 state. - */ -#define ORIBATIDA_256_RATE 16 - -/** - * \brief Size of the masking value for Oribatida-256-64. - */ -#define ORIBATIDA_256_MASK_SIZE 8 - -/** - * \brief Rate for processing data for the Oribatida-192-96 state. - */ -#define ORIBATIDA_192_RATE 12 - -/** - * \brief Size of the masking value for Oribatida-192-96. - */ -#define ORIBATIDA_192_MASK_SIZE 12 - -aead_cipher_t const oribatida_256_cipher = { - "Oribatida-256-64", - ORIBATIDA_256_KEY_SIZE, - ORIBATIDA_256_NONCE_SIZE, - ORIBATIDA_256_TAG_SIZE, - AEAD_FLAG_NONE, - oribatida_256_aead_encrypt, - oribatida_256_aead_decrypt -}; - -aead_cipher_t const oribatida_192_cipher = { - "Oribatida-192-96", - ORIBATIDA_192_KEY_SIZE, - ORIBATIDA_192_NONCE_SIZE, - ORIBATIDA_192_TAG_SIZE, - AEAD_FLAG_NONE, - oribatida_192_aead_encrypt, - oribatida_192_aead_decrypt -}; - -/* Definitions for domain separation values */ -#define ORIBATIDA_NUM_DOMAINS 3 -#define ORIBATIDA_DOMAIN_NONCE 0 -#define ORIBATIDA_DOMAIN_AD 1 -#define ORIBATIDA_DOMAIN_MSG 2 - -/** - * \brief Gets the domain separation values to use for different phases - * of the Oribatida encryption process. - * - * \param domains Returns the domain separation values to use. - * \param adlen Length of the associated data. - * \param mlen Length of the plaintext message. - * \param rate Rate of processing message blocks, 12 or 16. - */ -static void oribatida_get_domains - (unsigned char domains[ORIBATIDA_NUM_DOMAINS], - unsigned long long adlen, unsigned long long mlen, unsigned rate) -{ - /* Domain separation value for the nonce */ - if (adlen == 0 && mlen == 0) { - domains[ORIBATIDA_DOMAIN_NONCE] = 9; - } else { - domains[ORIBATIDA_DOMAIN_NONCE] = 5; - } - - /* Domain separation value for associated data processing */ - if (mlen == 0) { - if ((adlen % rate) == 0) - domains[ORIBATIDA_DOMAIN_AD] = 12; - else - domains[ORIBATIDA_DOMAIN_AD] = 14; - } else { - if ((adlen % rate) == 0) - domains[ORIBATIDA_DOMAIN_AD] = 4; - else - domains[ORIBATIDA_DOMAIN_AD] = 6; - } - - /* Domain separation value for message processing */ - if ((mlen % rate) == 0) { - domains[ORIBATIDA_DOMAIN_MSG] = 13; - } else { - domains[ORIBATIDA_DOMAIN_MSG] = 15; - } -} - -/** - * \brief Initializes the Oribatida-256-64 state. - * - * \param state Oribatida-256-64 permutation state. - * \param mask Oribatida-256-64 masking state. - * \param domains Precomputed domain separation values. - * \param k Points to the key. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void oribatida_256_init - (unsigned char state[SIMP_256_STATE_SIZE], - unsigned char mask[ORIBATIDA_256_MASK_SIZE], - const unsigned char domains[ORIBATIDA_NUM_DOMAINS], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state with the key and nonce */ - memcpy(state, npub, ORIBATIDA_256_NONCE_SIZE); - memcpy(state + ORIBATIDA_256_NONCE_SIZE, k, ORIBATIDA_256_KEY_SIZE); - - /* Use the current state as the mask for zero-length associated data */ - if (adlen == 0) { - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - } - - /* Add the domain separation value for the nonce */ - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_NONCE]; - - /* Run the permutation for the first time */ - simp_256_permute(state, 4); - - /* If there is no associated data, then we are done */ - if (adlen == 0) - return; - - /* Use the current state as the mask for non-zero length associated data */ - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - - /* Process all associated data blocks except the last */ - while (adlen > ORIBATIDA_256_RATE) { - lw_xor_block(state, ad, ORIBATIDA_256_RATE); - simp_256_permute(state, 2); - ad += ORIBATIDA_256_RATE; - adlen -= ORIBATIDA_256_RATE; - } - - /* Process the final associated data block */ - temp = (unsigned)adlen; - if (temp == ORIBATIDA_256_RATE) { - lw_xor_block(state, ad, ORIBATIDA_256_RATE); - } else { - lw_xor_block(state, ad, temp); - state[temp] ^= 0x80; /* padding */ - } - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_AD]; - simp_256_permute(state, 4); -} - -/** - * \brief Initializes the Oribatida-192-96 state. - * - * \param state Oribatida-192-96 permutation state. - * \param mask Oribatida-192-96 masking state. - * \param domains Precomputed domain separation values. - * \param k Points to the key. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void oribatida_192_init - (unsigned char state[SIMP_192_STATE_SIZE], - unsigned char mask[ORIBATIDA_192_MASK_SIZE], - const unsigned char domains[ORIBATIDA_NUM_DOMAINS], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state with the key and nonce */ - memcpy(state, npub, ORIBATIDA_192_NONCE_SIZE); - memcpy(state + ORIBATIDA_192_NONCE_SIZE, k, ORIBATIDA_256_KEY_SIZE); - - /* Use the current state as the mask for zero-length associated data */ - if (adlen == 0) { - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - } - - /* Add the domain separation value for the nonce */ - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_NONCE]; - - /* Run the permutation for the first time */ - simp_192_permute(state, 4); - - /* If there is no associated data, then we are done */ - if (adlen == 0) - return; - - /* Use the current state as the mask for non-zero length associated data */ - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - - /* Process all associated data blocks except the last */ - while (adlen > ORIBATIDA_192_RATE) { - lw_xor_block(state, ad, ORIBATIDA_192_RATE); - simp_192_permute(state, 2); - ad += ORIBATIDA_192_RATE; - adlen -= ORIBATIDA_192_RATE; - } - - /* Process the final associated data block */ - temp = (unsigned)adlen; - if (temp == ORIBATIDA_192_RATE) { - lw_xor_block(state, ad, ORIBATIDA_192_RATE); - } else { - lw_xor_block(state, ad, temp); - state[temp] ^= 0x80; /* padding */ - } - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_AD]; - simp_192_permute(state, 4); -} - -int oribatida_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_256_STATE_SIZE]; - unsigned char mask[ORIBATIDA_256_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ORIBATIDA_256_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - oribatida_get_domains(domains, adlen, mlen, ORIBATIDA_256_RATE); - oribatida_256_init(state, mask, domains, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen > ORIBATIDA_256_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_256_RATE); - lw_xor_block(c + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - simp_256_permute(state, 4); - c += ORIBATIDA_256_RATE; - m += ORIBATIDA_256_RATE; - mlen -= ORIBATIDA_256_RATE; - } - if (mlen == ORIBATIDA_256_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_256_RATE); - lw_xor_block(c + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } else if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state, m, temp); - if (temp > (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)) { - lw_xor_block - (c + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, mask, - temp - (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)); - } - state[temp] ^= 0x80; /* padding */ - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } - - /* Generate the authentication tag */ - memcpy(c + mlen, state, ORIBATIDA_256_TAG_SIZE); - return 0; -} - -int oribatida_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_256_STATE_SIZE]; - unsigned char mask[ORIBATIDA_256_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - unsigned char block[ORIBATIDA_256_RATE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ORIBATIDA_256_TAG_SIZE) - return -1; - *mlen = clen - ORIBATIDA_256_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - clen -= ORIBATIDA_256_TAG_SIZE; - oribatida_get_domains(domains, adlen, clen, ORIBATIDA_256_RATE); - oribatida_256_init(state, mask, domains, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - while (clen > ORIBATIDA_256_RATE) { - memcpy(block, c, ORIBATIDA_256_RATE); - lw_xor_block(block + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_256_RATE); - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - simp_256_permute(state, 4); - c += ORIBATIDA_256_RATE; - m += ORIBATIDA_256_RATE; - clen -= ORIBATIDA_256_RATE; - } - if (clen == ORIBATIDA_256_RATE) { - memcpy(block, c, ORIBATIDA_256_RATE); - lw_xor_block(block + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_256_RATE); - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } else if (clen > 0) { - unsigned temp = (unsigned)clen; - memcpy(block, c, temp); - if (temp > (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)) { - lw_xor_block - (block + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, mask, - temp - (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)); - } - lw_xor_block_swap(m, state, block, temp); - state[temp] ^= 0x80; /* padding */ - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } - c += clen; - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state, c, ORIBATIDA_256_TAG_SIZE); -} - -int oribatida_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_192_STATE_SIZE]; - unsigned char mask[ORIBATIDA_192_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ORIBATIDA_192_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - oribatida_get_domains(domains, adlen, mlen, ORIBATIDA_192_RATE); - oribatida_192_init(state, mask, domains, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen > ORIBATIDA_192_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_192_RATE); - lw_xor_block(c + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - simp_192_permute(state, 4); - c += ORIBATIDA_192_RATE; - m += ORIBATIDA_192_RATE; - mlen -= ORIBATIDA_192_RATE; - } - if (mlen == ORIBATIDA_192_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_192_RATE); - lw_xor_block(c + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } else if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state, m, temp); - if (temp > (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)) { - lw_xor_block - (c + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, mask, - temp - (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)); - } - state[temp] ^= 0x80; /* padding */ - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } - - /* Generate the authentication tag */ - memcpy(c + mlen, state, ORIBATIDA_192_TAG_SIZE); - return 0; -} - -int oribatida_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_192_STATE_SIZE]; - unsigned char mask[ORIBATIDA_192_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - unsigned char block[ORIBATIDA_192_RATE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ORIBATIDA_192_TAG_SIZE) - return -1; - *mlen = clen - ORIBATIDA_192_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - clen -= ORIBATIDA_192_TAG_SIZE; - oribatida_get_domains(domains, adlen, clen, ORIBATIDA_192_RATE); - oribatida_192_init(state, mask, domains, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - while (clen > ORIBATIDA_192_RATE) { - memcpy(block, c, ORIBATIDA_192_RATE); - lw_xor_block(block + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_192_RATE); - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - simp_192_permute(state, 4); - c += ORIBATIDA_192_RATE; - m += ORIBATIDA_192_RATE; - clen -= ORIBATIDA_192_RATE; - } - if (clen == ORIBATIDA_192_RATE) { - memcpy(block, c, ORIBATIDA_192_RATE); - lw_xor_block(block + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_192_RATE); - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } else if (clen > 0) { - unsigned temp = (unsigned)clen; - memcpy(block, c, temp); - if (temp > (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)) { - lw_xor_block - (block + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, mask, - temp - (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)); - } - lw_xor_block_swap(m, state, block, temp); - state[temp] ^= 0x80; /* padding */ - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } - c += clen; - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state, c, ORIBATIDA_192_TAG_SIZE); -} diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.h b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.h deleted file mode 100644 index dbc374b..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys-avr/oribatida.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ORIBATIDA_H -#define LWCRYPTO_ORIBATIDA_H - -#include "aead-common.h" - -/** - * \file oribatida.h - * \brief Oribatida authenticated encryption algorithm. - * - * Oribatida is a family of authenticated encryption algorithms based on the - * SimP-256 and SimP-192 permutations which are built around reduced-round - * variants of the Simon-128-128 and Simon-96-96 block ciphers. - * There are two algorithms in the family: - * - * \li Oribatida-256-64 with a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * built around the SimP-256 permutation. This is the primary member of - * the family. - * \li Oribatida-192-96 with a 128-bit key, a 64-bit nonce, and a 96-bit tag, - * built around the SimP-192 permutation. - * - * References: https://www.isical.ac.in/~lightweight/oribatida/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Oribatida-256-64. - */ -#define ORIBATIDA_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Oribatida-256-64. - */ -#define ORIBATIDA_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Oribatida-256-64. - */ -#define ORIBATIDA_256_NONCE_SIZE 16 - -/** - * \brief Size of the key for Oribatida-192-96. - */ -#define ORIBATIDA_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Oribatida-192-96. - */ -#define ORIBATIDA_192_TAG_SIZE 12 - -/** - * \brief Size of the nonce for Oribatida-192-96. - */ -#define ORIBATIDA_192_NONCE_SIZE 8 - -/** - * \brief Meta-information block for the Oribatida-256-64 cipher. - */ -extern aead_cipher_t const oribatida_256_cipher; - -/** - * \brief Meta-information block for the Oribatida-192-96 cipher. - */ -extern aead_cipher_t const oribatida_192_cipher; - -/** - * \brief Encrypts and authenticates a packet with Oribatida-256-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa oribatida_256_aead_decrypt() - */ -int oribatida_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Oribatida-256-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa oribatida_256_aead_encrypt() - */ -int oribatida_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Oribatida-192-96. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 12 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa oribatida_192_aead_decrypt() - */ -int oribatida_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Oribatida-192-96. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 12 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa oribatida_192_aead_encrypt() - */ -int oribatida_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp-avr.S b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp-avr.S new file mode 100644 index 0000000..65fba20 --- /dev/null +++ b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp-avr.S @@ -0,0 +1,949 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global simp_256_permute + .type simp_256_permute, @function +simp_256_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r23,245 + mov r10,r23 + ldi r17,14 + mov r11,r17 + ldi r16,44 + mov r12,r16 + ldi r23,25 + mov r13,r23 + ldi r23,133 + mov r14,r23 + ldi r23,248 + mov r15,r23 + ldi r24,105 + ldi r25,51 +14: + ldi r23,17 +16: + ldd r29,Z+16 + ldd r28,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + mov r2,r29 + mov r3,r18 + mov r4,r19 + mov r5,r20 + mov r6,r21 + mov r7,r26 + mov r8,r27 + mov r9,r28 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r28 + rol r29 + adc r18,r1 + and r2,r18 + and r3,r19 + and r4,r20 + and r5,r21 + and r6,r26 + and r7,r27 + and r8,r28 + and r9,r29 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r28 + rol r29 + adc r18,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + eor r8,r28 + eor r9,r29 + ldd r0,Z+8 + eor r9,r0 + ldd r0,Z+9 + eor r8,r0 + ldd r0,Z+10 + eor r7,r0 + ldd r0,Z+11 + eor r6,r0 + ldd r0,Z+12 + eor r5,r0 + ldd r0,Z+13 + eor r4,r0 + ldd r0,Z+14 + eor r3,r0 + ldd r0,Z+15 + eor r2,r0 + ldd r0,Z+24 + eor r0,r9 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r8 + std Z+25,r0 + ldd r0,Z+26 + eor r0,r7 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r6 + std Z+27,r0 + ldd r0,Z+28 + eor r0,r5 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r4 + std Z+29,r0 + ldd r0,Z+30 + eor r0,r3 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r2 + std Z+31,r0 + ld r29,Z + ldd r28,Z+1 + ldd r27,Z+2 + ldd r26,Z+3 + ldd r21,Z+4 + ldd r20,Z+5 + ldd r19,Z+6 + ldd r18,Z+7 + mov r0,r1 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r29,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + movw r8,r28 + bst r2,0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + bld r9,7 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + ldi r17,252 + eor r18,r17 + com r19 + com r20 + com r21 + com r26 + com r27 + com r28 + com r29 + mov r0,r1 + bst r10,0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + bld r25,5 + bld r0,0 + eor r18,r0 + ldd r0,Z+8 + eor r0,r29 + std Z+8,r0 + ldd r0,Z+9 + eor r0,r28 + std Z+9,r0 + ldd r0,Z+10 + eor r0,r27 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r26 + std Z+11,r0 + ldd r0,Z+12 + eor r0,r21 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r20 + std Z+13,r0 + ldd r0,Z+14 + eor r0,r19 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r18 + std Z+15,r0 + ldd r9,Z+24 + ldd r8,Z+25 + ldd r7,Z+26 + ldd r6,Z+27 + ldd r5,Z+28 + ldd r4,Z+29 + ldd r3,Z+30 + ldd r2,Z+31 + mov r18,r9 + mov r19,r2 + mov r20,r3 + mov r21,r4 + mov r26,r5 + mov r27,r6 + mov r28,r7 + mov r29,r8 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + adc r2,r1 + and r18,r2 + and r19,r3 + and r20,r4 + and r21,r5 + and r26,r6 + and r27,r7 + and r28,r8 + and r29,r9 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + adc r2,r1 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + ld r0,Z + eor r29,r0 + ldd r0,Z+1 + eor r28,r0 + ldd r0,Z+2 + eor r27,r0 + ldd r0,Z+3 + eor r26,r0 + ldd r0,Z+4 + eor r21,r0 + ldd r0,Z+5 + eor r20,r0 + ldd r0,Z+6 + eor r19,r0 + ldd r0,Z+7 + eor r18,r0 + ldd r0,Z+16 + eor r0,r29 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r28 + std Z+17,r0 + ldd r0,Z+18 + eor r0,r27 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r26 + std Z+19,r0 + ldd r0,Z+20 + eor r0,r21 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r20 + std Z+21,r0 + ldd r0,Z+22 + eor r0,r19 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r18 + std Z+23,r0 + ldd r29,Z+8 + ldd r28,Z+9 + ldd r27,Z+10 + ldd r26,Z+11 + ldd r21,Z+12 + ldd r20,Z+13 + ldd r19,Z+14 + ldd r18,Z+15 + mov r0,r1 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r29,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + movw r8,r28 + bst r18,0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + bld r29,7 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + eor r8,r28 + eor r9,r29 + eor r2,r17 + com r3 + com r4 + com r5 + com r6 + com r7 + com r8 + com r9 + mov r0,r1 + bst r10,0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + bld r25,5 + bld r0,0 + eor r2,r0 + ld r0,Z + eor r0,r9 + st Z,r0 + ldd r0,Z+1 + eor r0,r8 + std Z+1,r0 + ldd r0,Z+2 + eor r0,r7 + std Z+2,r0 + ldd r0,Z+3 + eor r0,r6 + std Z+3,r0 + ldd r0,Z+4 + eor r0,r5 + std Z+4,r0 + ldd r0,Z+5 + eor r0,r4 + std Z+5,r0 + ldd r0,Z+6 + eor r0,r3 + std Z+6,r0 + ldd r0,Z+7 + eor r0,r2 + std Z+7,r0 + dec r23 + breq 5407f + rjmp 16b +5407: + dec r22 + brne 5409f + rjmp 475f +5409: + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + ldd r6,Z+20 + ldd r7,Z+21 + ldd r8,Z+22 + ldd r9,Z+23 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + std Z+4,r6 + std Z+5,r7 + std Z+6,r8 + std Z+7,r9 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r26 + std Z+21,r27 + std Z+22,r28 + std Z+23,r29 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + std Z+12,r6 + std Z+13,r7 + std Z+14,r8 + std Z+15,r9 + std Z+24,r18 + std Z+25,r19 + std Z+26,r20 + std Z+27,r21 + std Z+28,r26 + std Z+29,r27 + std Z+30,r28 + std Z+31,r29 + rjmp 14b +475: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size simp_256_permute, .-simp_256_permute + + .text +.global simp_192_permute + .type simp_192_permute, @function +simp_192_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r25,245 + mov r8,r25 + ldi r24,14 + mov r9,r24 + ldi r23,44 + mov r10,r23 + ldi r17,25 + mov r11,r17 + ldi r16,133 + mov r12,r16 + ldi r23,248 + mov r13,r23 + ldi r23,105 + mov r14,r23 + ldi r23,51 + mov r15,r23 +16: + ldi r23,13 +18: + ldd r27,Z+12 + ldd r26,Z+13 + ldd r21,Z+14 + ldd r20,Z+15 + ldd r19,Z+16 + ldd r18,Z+17 + mov r2,r27 + mov r3,r18 + mov r4,r19 + mov r5,r20 + mov r6,r21 + mov r7,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + adc r18,r1 + and r2,r18 + and r3,r19 + and r4,r20 + and r5,r21 + and r6,r26 + and r7,r27 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + adc r18,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + ldd r0,Z+6 + eor r7,r0 + ldd r0,Z+7 + eor r6,r0 + ldd r0,Z+8 + eor r5,r0 + ldd r0,Z+9 + eor r4,r0 + ldd r0,Z+10 + eor r3,r0 + ldd r0,Z+11 + eor r2,r0 + ldd r0,Z+18 + eor r0,r7 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r6 + std Z+19,r0 + ldd r0,Z+20 + eor r0,r5 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r4 + std Z+21,r0 + ldd r0,Z+22 + eor r0,r3 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r2 + std Z+23,r0 + ld r27,Z + ldd r26,Z+1 + ldd r21,Z+2 + ldd r20,Z+3 + ldd r19,Z+4 + ldd r18,Z+5 + mov r0,r1 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r27,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + bst r2,0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + bld r7,7 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + ldi r25,252 + eor r18,r25 + com r19 + com r20 + com r21 + com r26 + com r27 + mov r0,r1 + bst r8,0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + ror r9 + ror r8 + bld r15,5 + bld r0,0 + eor r18,r0 + ldd r0,Z+6 + eor r0,r27 + std Z+6,r0 + ldd r0,Z+7 + eor r0,r26 + std Z+7,r0 + ldd r0,Z+8 + eor r0,r21 + std Z+8,r0 + ldd r0,Z+9 + eor r0,r20 + std Z+9,r0 + ldd r0,Z+10 + eor r0,r19 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r18 + std Z+11,r0 + ldd r7,Z+18 + ldd r6,Z+19 + ldd r5,Z+20 + ldd r4,Z+21 + ldd r3,Z+22 + ldd r2,Z+23 + mov r18,r7 + mov r19,r2 + mov r20,r3 + mov r21,r4 + mov r26,r5 + mov r27,r6 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + adc r2,r1 + and r18,r2 + and r19,r3 + and r20,r4 + and r21,r5 + and r26,r6 + and r27,r7 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + adc r2,r1 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + ld r0,Z + eor r27,r0 + ldd r0,Z+1 + eor r26,r0 + ldd r0,Z+2 + eor r21,r0 + ldd r0,Z+3 + eor r20,r0 + ldd r0,Z+4 + eor r19,r0 + ldd r0,Z+5 + eor r18,r0 + ldd r0,Z+12 + eor r0,r27 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r26 + std Z+13,r0 + ldd r0,Z+14 + eor r0,r21 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r20 + std Z+15,r0 + ldd r0,Z+16 + eor r0,r19 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r18 + std Z+17,r0 + ldd r27,Z+6 + ldd r26,Z+7 + ldd r21,Z+8 + ldd r20,Z+9 + ldd r19,Z+10 + ldd r18,Z+11 + mov r0,r1 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r27,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + bst r18,0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + bld r27,7 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + eor r2,r25 + com r3 + com r4 + com r5 + com r6 + com r7 + mov r0,r1 + bst r8,0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + ror r9 + ror r8 + bld r15,5 + bld r0,0 + eor r2,r0 + ld r0,Z + eor r0,r7 + st Z,r0 + ldd r0,Z+1 + eor r0,r6 + std Z+1,r0 + ldd r0,Z+2 + eor r0,r5 + std Z+2,r0 + ldd r0,Z+3 + eor r0,r4 + std Z+3,r0 + ldd r0,Z+4 + eor r0,r3 + std Z+4,r0 + ldd r0,Z+5 + eor r0,r2 + std Z+5,r0 + dec r23 + breq 5323f + rjmp 18b +5323: + dec r22 + breq 375f + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+12 + ldd r3,Z+13 + ldd r4,Z+14 + ldd r5,Z+15 + ldd r6,Z+16 + ldd r7,Z+17 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + std Z+4,r6 + std Z+5,r7 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+16,r26 + std Z+17,r27 + ldd r18,Z+6 + ldd r19,Z+7 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r26,Z+10 + ldd r27,Z+11 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + std Z+6,r2 + std Z+7,r3 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+18,r18 + std Z+19,r19 + std Z+20,r20 + std Z+21,r21 + std Z+22,r26 + std Z+23,r27 + rjmp 16b +375: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size simp_192_permute, .-simp_192_permute + +#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp.c b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp.c index 4ca50d0..5d2144e 100644 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp.c +++ b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-simp.c @@ -22,6 +22,8 @@ #include "internal-simp.h" +#if !defined(__AVR__) + /** * \brief Number of rounds for the inner block cipher within SimP-256. */ @@ -166,3 +168,5 @@ void simp_192_permute(unsigned char state[SIMP_192_STATE_SIZE], unsigned steps) be_store_word48(state + 12, x2); be_store_word48(state + 18, x3); } + +#endif /* !__AVR__ */ diff --git a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-util.h b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-util.h index e79158c..e30166d 100644 --- a/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-util.h +++ b/oribatida/Implementations/crypto_aead/oribatida192v12/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.c b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.h b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/api.h b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/encrypt.c b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/encrypt.c deleted file mode 100644 index fd7d71e..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "oribatida.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return oribatida_256_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return oribatida_256_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp-avr.S b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp-avr.S deleted file mode 100644 index 65fba20..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp-avr.S +++ /dev/null @@ -1,949 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global simp_256_permute - .type simp_256_permute, @function -simp_256_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r23,245 - mov r10,r23 - ldi r17,14 - mov r11,r17 - ldi r16,44 - mov r12,r16 - ldi r23,25 - mov r13,r23 - ldi r23,133 - mov r14,r23 - ldi r23,248 - mov r15,r23 - ldi r24,105 - ldi r25,51 -14: - ldi r23,17 -16: - ldd r29,Z+16 - ldd r28,Z+17 - ldd r27,Z+18 - ldd r26,Z+19 - ldd r21,Z+20 - ldd r20,Z+21 - ldd r19,Z+22 - ldd r18,Z+23 - mov r2,r29 - mov r3,r18 - mov r4,r19 - mov r5,r20 - mov r6,r21 - mov r7,r26 - mov r8,r27 - mov r9,r28 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r28 - rol r29 - adc r18,r1 - and r2,r18 - and r3,r19 - and r4,r20 - and r5,r21 - and r6,r26 - and r7,r27 - and r8,r28 - and r9,r29 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - rol r28 - rol r29 - adc r18,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - eor r8,r28 - eor r9,r29 - ldd r0,Z+8 - eor r9,r0 - ldd r0,Z+9 - eor r8,r0 - ldd r0,Z+10 - eor r7,r0 - ldd r0,Z+11 - eor r6,r0 - ldd r0,Z+12 - eor r5,r0 - ldd r0,Z+13 - eor r4,r0 - ldd r0,Z+14 - eor r3,r0 - ldd r0,Z+15 - eor r2,r0 - ldd r0,Z+24 - eor r0,r9 - std Z+24,r0 - ldd r0,Z+25 - eor r0,r8 - std Z+25,r0 - ldd r0,Z+26 - eor r0,r7 - std Z+26,r0 - ldd r0,Z+27 - eor r0,r6 - std Z+27,r0 - ldd r0,Z+28 - eor r0,r5 - std Z+28,r0 - ldd r0,Z+29 - eor r0,r4 - std Z+29,r0 - ldd r0,Z+30 - eor r0,r3 - std Z+30,r0 - ldd r0,Z+31 - eor r0,r2 - std Z+31,r0 - ld r29,Z - ldd r28,Z+1 - ldd r27,Z+2 - ldd r26,Z+3 - ldd r21,Z+4 - ldd r20,Z+5 - ldd r19,Z+6 - ldd r18,Z+7 - mov r0,r1 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r29,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - movw r8,r28 - bst r2,0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - bld r9,7 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - ldi r17,252 - eor r18,r17 - com r19 - com r20 - com r21 - com r26 - com r27 - com r28 - com r29 - mov r0,r1 - bst r10,0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - bld r25,5 - bld r0,0 - eor r18,r0 - ldd r0,Z+8 - eor r0,r29 - std Z+8,r0 - ldd r0,Z+9 - eor r0,r28 - std Z+9,r0 - ldd r0,Z+10 - eor r0,r27 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r26 - std Z+11,r0 - ldd r0,Z+12 - eor r0,r21 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r20 - std Z+13,r0 - ldd r0,Z+14 - eor r0,r19 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r18 - std Z+15,r0 - ldd r9,Z+24 - ldd r8,Z+25 - ldd r7,Z+26 - ldd r6,Z+27 - ldd r5,Z+28 - ldd r4,Z+29 - ldd r3,Z+30 - ldd r2,Z+31 - mov r18,r9 - mov r19,r2 - mov r20,r3 - mov r21,r4 - mov r26,r5 - mov r27,r6 - mov r28,r7 - mov r29,r8 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - adc r2,r1 - and r18,r2 - and r19,r3 - and r20,r4 - and r21,r5 - and r26,r6 - and r27,r7 - and r28,r8 - and r29,r9 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - rol r8 - rol r9 - adc r2,r1 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - ld r0,Z - eor r29,r0 - ldd r0,Z+1 - eor r28,r0 - ldd r0,Z+2 - eor r27,r0 - ldd r0,Z+3 - eor r26,r0 - ldd r0,Z+4 - eor r21,r0 - ldd r0,Z+5 - eor r20,r0 - ldd r0,Z+6 - eor r19,r0 - ldd r0,Z+7 - eor r18,r0 - ldd r0,Z+16 - eor r0,r29 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r28 - std Z+17,r0 - ldd r0,Z+18 - eor r0,r27 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r26 - std Z+19,r0 - ldd r0,Z+20 - eor r0,r21 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r20 - std Z+21,r0 - ldd r0,Z+22 - eor r0,r19 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r18 - std Z+23,r0 - ldd r29,Z+8 - ldd r28,Z+9 - ldd r27,Z+10 - ldd r26,Z+11 - ldd r21,Z+12 - ldd r20,Z+13 - ldd r19,Z+14 - ldd r18,Z+15 - mov r0,r1 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r29,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - movw r8,r28 - bst r18,0 - lsr r29 - ror r28 - ror r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - bld r29,7 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - eor r8,r28 - eor r9,r29 - eor r2,r17 - com r3 - com r4 - com r5 - com r6 - com r7 - com r8 - com r9 - mov r0,r1 - bst r10,0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - bld r25,5 - bld r0,0 - eor r2,r0 - ld r0,Z - eor r0,r9 - st Z,r0 - ldd r0,Z+1 - eor r0,r8 - std Z+1,r0 - ldd r0,Z+2 - eor r0,r7 - std Z+2,r0 - ldd r0,Z+3 - eor r0,r6 - std Z+3,r0 - ldd r0,Z+4 - eor r0,r5 - std Z+4,r0 - ldd r0,Z+5 - eor r0,r4 - std Z+5,r0 - ldd r0,Z+6 - eor r0,r3 - std Z+6,r0 - ldd r0,Z+7 - eor r0,r2 - std Z+7,r0 - dec r23 - breq 5407f - rjmp 16b -5407: - dec r22 - brne 5409f - rjmp 475f -5409: - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - ldd r2,Z+16 - ldd r3,Z+17 - ldd r4,Z+18 - ldd r5,Z+19 - ldd r6,Z+20 - ldd r7,Z+21 - ldd r8,Z+22 - ldd r9,Z+23 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - std Z+4,r6 - std Z+5,r7 - std Z+6,r8 - std Z+7,r9 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+20,r26 - std Z+21,r27 - std Z+22,r28 - std Z+23,r29 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - ldd r2,Z+24 - ldd r3,Z+25 - ldd r4,Z+26 - ldd r5,Z+27 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - std Z+12,r6 - std Z+13,r7 - std Z+14,r8 - std Z+15,r9 - std Z+24,r18 - std Z+25,r19 - std Z+26,r20 - std Z+27,r21 - std Z+28,r26 - std Z+29,r27 - std Z+30,r28 - std Z+31,r29 - rjmp 14b -475: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size simp_256_permute, .-simp_256_permute - - .text -.global simp_192_permute - .type simp_192_permute, @function -simp_192_permute: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ldi r25,245 - mov r8,r25 - ldi r24,14 - mov r9,r24 - ldi r23,44 - mov r10,r23 - ldi r17,25 - mov r11,r17 - ldi r16,133 - mov r12,r16 - ldi r23,248 - mov r13,r23 - ldi r23,105 - mov r14,r23 - ldi r23,51 - mov r15,r23 -16: - ldi r23,13 -18: - ldd r27,Z+12 - ldd r26,Z+13 - ldd r21,Z+14 - ldd r20,Z+15 - ldd r19,Z+16 - ldd r18,Z+17 - mov r2,r27 - mov r3,r18 - mov r4,r19 - mov r5,r20 - mov r6,r21 - mov r7,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - adc r18,r1 - and r2,r18 - and r3,r19 - and r4,r20 - and r5,r21 - and r6,r26 - and r7,r27 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r26 - rol r27 - adc r18,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - ldd r0,Z+6 - eor r7,r0 - ldd r0,Z+7 - eor r6,r0 - ldd r0,Z+8 - eor r5,r0 - ldd r0,Z+9 - eor r4,r0 - ldd r0,Z+10 - eor r3,r0 - ldd r0,Z+11 - eor r2,r0 - ldd r0,Z+18 - eor r0,r7 - std Z+18,r0 - ldd r0,Z+19 - eor r0,r6 - std Z+19,r0 - ldd r0,Z+20 - eor r0,r5 - std Z+20,r0 - ldd r0,Z+21 - eor r0,r4 - std Z+21,r0 - ldd r0,Z+22 - eor r0,r3 - std Z+22,r0 - ldd r0,Z+23 - eor r0,r2 - std Z+23,r0 - ld r27,Z - ldd r26,Z+1 - ldd r21,Z+2 - ldd r20,Z+3 - ldd r19,Z+4 - ldd r18,Z+5 - mov r0,r1 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r27,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - bst r2,0 - lsr r7 - ror r6 - ror r5 - ror r4 - ror r3 - ror r2 - bld r7,7 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - ldi r25,252 - eor r18,r25 - com r19 - com r20 - com r21 - com r26 - com r27 - mov r0,r1 - bst r8,0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - ror r9 - ror r8 - bld r15,5 - bld r0,0 - eor r18,r0 - ldd r0,Z+6 - eor r0,r27 - std Z+6,r0 - ldd r0,Z+7 - eor r0,r26 - std Z+7,r0 - ldd r0,Z+8 - eor r0,r21 - std Z+8,r0 - ldd r0,Z+9 - eor r0,r20 - std Z+9,r0 - ldd r0,Z+10 - eor r0,r19 - std Z+10,r0 - ldd r0,Z+11 - eor r0,r18 - std Z+11,r0 - ldd r7,Z+18 - ldd r6,Z+19 - ldd r5,Z+20 - ldd r4,Z+21 - ldd r3,Z+22 - ldd r2,Z+23 - mov r18,r7 - mov r19,r2 - mov r20,r3 - mov r21,r4 - mov r26,r5 - mov r27,r6 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - adc r2,r1 - and r18,r2 - and r19,r3 - and r20,r4 - and r21,r5 - and r26,r6 - and r27,r7 - lsl r2 - rol r3 - rol r4 - rol r5 - rol r6 - rol r7 - adc r2,r1 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - eor r26,r6 - eor r27,r7 - ld r0,Z - eor r27,r0 - ldd r0,Z+1 - eor r26,r0 - ldd r0,Z+2 - eor r21,r0 - ldd r0,Z+3 - eor r20,r0 - ldd r0,Z+4 - eor r19,r0 - ldd r0,Z+5 - eor r18,r0 - ldd r0,Z+12 - eor r0,r27 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r26 - std Z+13,r0 - ldd r0,Z+14 - eor r0,r21 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r20 - std Z+15,r0 - ldd r0,Z+16 - eor r0,r19 - std Z+16,r0 - ldd r0,Z+17 - eor r0,r18 - std Z+17,r0 - ldd r27,Z+6 - ldd r26,Z+7 - ldd r21,Z+8 - ldd r20,Z+9 - ldd r19,Z+10 - ldd r18,Z+11 - mov r0,r1 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - ror r0 - or r27,r0 - movw r2,r18 - movw r4,r20 - movw r6,r26 - bst r18,0 - lsr r27 - ror r26 - ror r21 - ror r20 - ror r19 - ror r18 - bld r27,7 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - eor r6,r26 - eor r7,r27 - eor r2,r25 - com r3 - com r4 - com r5 - com r6 - com r7 - mov r0,r1 - bst r8,0 - lsr r15 - ror r14 - ror r13 - ror r12 - ror r11 - ror r10 - ror r9 - ror r8 - bld r15,5 - bld r0,0 - eor r2,r0 - ld r0,Z - eor r0,r7 - st Z,r0 - ldd r0,Z+1 - eor r0,r6 - std Z+1,r0 - ldd r0,Z+2 - eor r0,r5 - std Z+2,r0 - ldd r0,Z+3 - eor r0,r4 - std Z+3,r0 - ldd r0,Z+4 - eor r0,r3 - std Z+4,r0 - ldd r0,Z+5 - eor r0,r2 - std Z+5,r0 - dec r23 - breq 5323f - rjmp 18b -5323: - dec r22 - breq 375f - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r2,Z+12 - ldd r3,Z+13 - ldd r4,Z+14 - ldd r5,Z+15 - ldd r6,Z+16 - ldd r7,Z+17 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - std Z+4,r6 - std Z+5,r7 - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - std Z+16,r26 - std Z+17,r27 - ldd r18,Z+6 - ldd r19,Z+7 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r26,Z+10 - ldd r27,Z+11 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - std Z+6,r2 - std Z+7,r3 - std Z+8,r4 - std Z+9,r5 - std Z+10,r6 - std Z+11,r7 - std Z+18,r18 - std Z+19,r19 - std Z+20,r20 - std Z+21,r21 - std Z+22,r26 - std Z+23,r27 - rjmp 16b -375: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size simp_192_permute, .-simp_192_permute - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.c b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.c deleted file mode 100644 index 5d2144e..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-simp.h" - -#if !defined(__AVR__) - -/** - * \brief Number of rounds for the inner block cipher within SimP-256. - */ -#define SIMP_256_ROUNDS 34 - -/** - * \brief Number of rounds for the inner block cipher within SimP-192. - */ -#define SIMP_192_ROUNDS 26 - -/** - * \brief Round constants for each of the rounds in SimP-256 or SimP-192. - * - * Bit i is the round constant for round i, repeated every 62 rounds. - */ -#define SIMP_RC 0x3369F885192C0EF5ULL - -void simp_256_permute(unsigned char state[SIMP_256_STATE_SIZE], unsigned steps) -{ - uint64_t z = SIMP_RC; - uint64_t x0, x1, x2, x3, t0, t1; - unsigned round; - - /* Load the state into local variables */ - x0 = be_load_word64(state); - x1 = be_load_word64(state + 8); - x2 = be_load_word64(state + 16); - x3 = be_load_word64(state + 24); - - /* Perform all steps */ - for (; steps > 0; --steps) { - /* Perform all rounds for this step, two at a time */ - for (round = 0; round < (SIMP_256_ROUNDS / 2); ++round) { - t1 = x3 ^ (leftRotate1_64(x2) & leftRotate8_64(x2)) ^ - leftRotate2_64(x2) ^ x1; - t0 = x1 ^ rightRotate3_64(x0) ^ rightRotate4_64(x0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - x2 = x2 ^ (leftRotate1_64(t1) & leftRotate8_64(t1)) ^ - leftRotate2_64(t1) ^ x0; - x0 = x0 ^ rightRotate3_64(t0) ^ rightRotate4_64(t0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - x1 = t0; - x3 = t1; - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - } - - /* Swap the words of the state for all steps except the last */ - if (steps > 1) { - t0 = x0; - t1 = x1; - x0 = x2; - x1 = x3; - x2 = t0; - x3 = t1; - } - } - - /* Write the local variables back to the state */ - be_store_word64(state, x0); - be_store_word64(state + 8, x1); - be_store_word64(state + 16, x2); - be_store_word64(state + 24, x3); -} - -/* Load a big-endian 48-bit word from a byte buffer */ -#define be_load_word48(ptr) \ - ((((uint64_t)((ptr)[0])) << 40) | \ - (((uint64_t)((ptr)[1])) << 32) | \ - (((uint64_t)((ptr)[2])) << 24) | \ - (((uint64_t)((ptr)[3])) << 16) | \ - (((uint64_t)((ptr)[4])) << 8) | \ - ((uint64_t)((ptr)[5]))) - -/* Store a big-endian 48-bit word into a byte buffer */ -#define be_store_word48(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 40); \ - (ptr)[1] = (uint8_t)(_x >> 32); \ - (ptr)[2] = (uint8_t)(_x >> 24); \ - (ptr)[3] = (uint8_t)(_x >> 16); \ - (ptr)[4] = (uint8_t)(_x >> 8); \ - (ptr)[5] = (uint8_t)_x; \ - } while (0) - -/* 48-bit rotations with the high bits set to garbage - truncated later */ -#define rightRotate3_48(x) (((x) >> 3) | ((x) << 45)) -#define rightRotate4_48(x) (((x) >> 4) | ((x) << 44)) -#define leftRotate1_48(x) (((x) << 1) | ((x) >> 47)) -#define leftRotate2_48(x) (((x) << 2) | ((x) >> 46)) -#define leftRotate8_48(x) (((x) << 8) | ((x) >> 40)) - -void simp_192_permute(unsigned char state[SIMP_192_STATE_SIZE], unsigned steps) -{ - uint64_t z = SIMP_RC; - uint64_t x0, x1, x2, x3, t0, t1; - unsigned round; - - /* Load the state into local variables */ - x0 = be_load_word48(state); - x1 = be_load_word48(state + 6); - x2 = be_load_word48(state + 12); - x3 = be_load_word48(state + 18); - - /* Perform all steps */ - for (; steps > 0; --steps) { - /* Perform all rounds for this step, two at a time */ - for (round = 0; round < (SIMP_192_ROUNDS / 2); ++round) { - t1 = x3 ^ (leftRotate1_48(x2) & leftRotate8_48(x2)) ^ - leftRotate2_48(x2) ^ x1; - t0 = x1 ^ rightRotate3_48(x0) ^ rightRotate4_48(x0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - t0 &= 0x0000FFFFFFFFFFFFULL; /* Truncate back to 48 bits */ - t1 &= 0x0000FFFFFFFFFFFFULL; - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - x2 = x2 ^ (leftRotate1_48(t1) & leftRotate8_48(t1)) ^ - leftRotate2_48(t1) ^ x0; - x0 = x0 ^ rightRotate3_48(t0) ^ rightRotate4_48(t0) ^ - 0xFFFFFFFFFFFFFFFCULL ^ (z & 1); - x0 &= 0x0000FFFFFFFFFFFFULL; - x2 &= 0x0000FFFFFFFFFFFFULL; - x1 = t0; - x3 = t1; - z = (z >> 1) | (z << 61); /* z repeats every 62 rounds */ - } - - /* Swap the words of the state for all steps except the last */ - if (steps > 1) { - t0 = x0; - t1 = x1; - x0 = x2; - x1 = x3; - x2 = t0; - x3 = t1; - } - } - - /* Write the local variables back to the state */ - be_store_word48(state, x0); - be_store_word48(state + 6, x1); - be_store_word48(state + 12, x2); - be_store_word48(state + 18, x3); -} - -#endif /* !__AVR__ */ diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.h b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.h deleted file mode 100644 index 3a95e80..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-simp.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SIMP_H -#define LW_INTERNAL_SIMP_H - -#include "internal-util.h" - -/** - * \file internal-simp.h - * \brief SimP permutation family. - * - * SimP-256 and SimP-192 are used by the Oribatida submission to - * round 2 of the NIST Lightweight Cryptography Competition. - * The permutations are built around reduced-round variants of the - * Simon-128-128 and Simon-96-96 block ciphers. - * - * References: https://www.isical.ac.in/~lightweight/oribatida/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief State size of the SimP-256 permutation. - */ -#define SIMP_256_STATE_SIZE 32 - -/** - * \brief State size of the SimP-192 permutation. - */ -#define SIMP_192_STATE_SIZE 24 - -/** - * \brief Permutes a state with SimP-256. - * - * \param state State to be permuted. - * \param steps Number of steps to perform (usually 2 or 4). - */ -void simp_256_permute(unsigned char state[SIMP_256_STATE_SIZE], unsigned steps); - -/** - * \brief Permutes a state with SimP-192. - * - * \param state State to be permuted. - * \param steps Number of steps to perform (usually 2 or 4). - */ -void simp_192_permute(unsigned char state[SIMP_192_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-util.h b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.c b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.c deleted file mode 100644 index 55a3914..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.c +++ /dev/null @@ -1,480 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "oribatida.h" -#include "internal-simp.h" -#include - -/** - * \brief Rate for processing data for the Oribatida-256-64 state. - */ -#define ORIBATIDA_256_RATE 16 - -/** - * \brief Size of the masking value for Oribatida-256-64. - */ -#define ORIBATIDA_256_MASK_SIZE 8 - -/** - * \brief Rate for processing data for the Oribatida-192-96 state. - */ -#define ORIBATIDA_192_RATE 12 - -/** - * \brief Size of the masking value for Oribatida-192-96. - */ -#define ORIBATIDA_192_MASK_SIZE 12 - -aead_cipher_t const oribatida_256_cipher = { - "Oribatida-256-64", - ORIBATIDA_256_KEY_SIZE, - ORIBATIDA_256_NONCE_SIZE, - ORIBATIDA_256_TAG_SIZE, - AEAD_FLAG_NONE, - oribatida_256_aead_encrypt, - oribatida_256_aead_decrypt -}; - -aead_cipher_t const oribatida_192_cipher = { - "Oribatida-192-96", - ORIBATIDA_192_KEY_SIZE, - ORIBATIDA_192_NONCE_SIZE, - ORIBATIDA_192_TAG_SIZE, - AEAD_FLAG_NONE, - oribatida_192_aead_encrypt, - oribatida_192_aead_decrypt -}; - -/* Definitions for domain separation values */ -#define ORIBATIDA_NUM_DOMAINS 3 -#define ORIBATIDA_DOMAIN_NONCE 0 -#define ORIBATIDA_DOMAIN_AD 1 -#define ORIBATIDA_DOMAIN_MSG 2 - -/** - * \brief Gets the domain separation values to use for different phases - * of the Oribatida encryption process. - * - * \param domains Returns the domain separation values to use. - * \param adlen Length of the associated data. - * \param mlen Length of the plaintext message. - * \param rate Rate of processing message blocks, 12 or 16. - */ -static void oribatida_get_domains - (unsigned char domains[ORIBATIDA_NUM_DOMAINS], - unsigned long long adlen, unsigned long long mlen, unsigned rate) -{ - /* Domain separation value for the nonce */ - if (adlen == 0 && mlen == 0) { - domains[ORIBATIDA_DOMAIN_NONCE] = 9; - } else { - domains[ORIBATIDA_DOMAIN_NONCE] = 5; - } - - /* Domain separation value for associated data processing */ - if (mlen == 0) { - if ((adlen % rate) == 0) - domains[ORIBATIDA_DOMAIN_AD] = 12; - else - domains[ORIBATIDA_DOMAIN_AD] = 14; - } else { - if ((adlen % rate) == 0) - domains[ORIBATIDA_DOMAIN_AD] = 4; - else - domains[ORIBATIDA_DOMAIN_AD] = 6; - } - - /* Domain separation value for message processing */ - if ((mlen % rate) == 0) { - domains[ORIBATIDA_DOMAIN_MSG] = 13; - } else { - domains[ORIBATIDA_DOMAIN_MSG] = 15; - } -} - -/** - * \brief Initializes the Oribatida-256-64 state. - * - * \param state Oribatida-256-64 permutation state. - * \param mask Oribatida-256-64 masking state. - * \param domains Precomputed domain separation values. - * \param k Points to the key. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void oribatida_256_init - (unsigned char state[SIMP_256_STATE_SIZE], - unsigned char mask[ORIBATIDA_256_MASK_SIZE], - const unsigned char domains[ORIBATIDA_NUM_DOMAINS], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state with the key and nonce */ - memcpy(state, npub, ORIBATIDA_256_NONCE_SIZE); - memcpy(state + ORIBATIDA_256_NONCE_SIZE, k, ORIBATIDA_256_KEY_SIZE); - - /* Use the current state as the mask for zero-length associated data */ - if (adlen == 0) { - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - } - - /* Add the domain separation value for the nonce */ - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_NONCE]; - - /* Run the permutation for the first time */ - simp_256_permute(state, 4); - - /* If there is no associated data, then we are done */ - if (adlen == 0) - return; - - /* Use the current state as the mask for non-zero length associated data */ - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - - /* Process all associated data blocks except the last */ - while (adlen > ORIBATIDA_256_RATE) { - lw_xor_block(state, ad, ORIBATIDA_256_RATE); - simp_256_permute(state, 2); - ad += ORIBATIDA_256_RATE; - adlen -= ORIBATIDA_256_RATE; - } - - /* Process the final associated data block */ - temp = (unsigned)adlen; - if (temp == ORIBATIDA_256_RATE) { - lw_xor_block(state, ad, ORIBATIDA_256_RATE); - } else { - lw_xor_block(state, ad, temp); - state[temp] ^= 0x80; /* padding */ - } - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_AD]; - simp_256_permute(state, 4); -} - -/** - * \brief Initializes the Oribatida-192-96 state. - * - * \param state Oribatida-192-96 permutation state. - * \param mask Oribatida-192-96 masking state. - * \param domains Precomputed domain separation values. - * \param k Points to the key. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void oribatida_192_init - (unsigned char state[SIMP_192_STATE_SIZE], - unsigned char mask[ORIBATIDA_192_MASK_SIZE], - const unsigned char domains[ORIBATIDA_NUM_DOMAINS], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state with the key and nonce */ - memcpy(state, npub, ORIBATIDA_192_NONCE_SIZE); - memcpy(state + ORIBATIDA_192_NONCE_SIZE, k, ORIBATIDA_256_KEY_SIZE); - - /* Use the current state as the mask for zero-length associated data */ - if (adlen == 0) { - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - } - - /* Add the domain separation value for the nonce */ - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_NONCE]; - - /* Run the permutation for the first time */ - simp_192_permute(state, 4); - - /* If there is no associated data, then we are done */ - if (adlen == 0) - return; - - /* Use the current state as the mask for non-zero length associated data */ - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - - /* Process all associated data blocks except the last */ - while (adlen > ORIBATIDA_192_RATE) { - lw_xor_block(state, ad, ORIBATIDA_192_RATE); - simp_192_permute(state, 2); - ad += ORIBATIDA_192_RATE; - adlen -= ORIBATIDA_192_RATE; - } - - /* Process the final associated data block */ - temp = (unsigned)adlen; - if (temp == ORIBATIDA_192_RATE) { - lw_xor_block(state, ad, ORIBATIDA_192_RATE); - } else { - lw_xor_block(state, ad, temp); - state[temp] ^= 0x80; /* padding */ - } - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_AD]; - simp_192_permute(state, 4); -} - -int oribatida_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_256_STATE_SIZE]; - unsigned char mask[ORIBATIDA_256_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ORIBATIDA_256_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - oribatida_get_domains(domains, adlen, mlen, ORIBATIDA_256_RATE); - oribatida_256_init(state, mask, domains, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen > ORIBATIDA_256_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_256_RATE); - lw_xor_block(c + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - simp_256_permute(state, 4); - c += ORIBATIDA_256_RATE; - m += ORIBATIDA_256_RATE; - mlen -= ORIBATIDA_256_RATE; - } - if (mlen == ORIBATIDA_256_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_256_RATE); - lw_xor_block(c + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } else if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state, m, temp); - if (temp > (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)) { - lw_xor_block - (c + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, mask, - temp - (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)); - } - state[temp] ^= 0x80; /* padding */ - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } - - /* Generate the authentication tag */ - memcpy(c + mlen, state, ORIBATIDA_256_TAG_SIZE); - return 0; -} - -int oribatida_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_256_STATE_SIZE]; - unsigned char mask[ORIBATIDA_256_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - unsigned char block[ORIBATIDA_256_RATE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ORIBATIDA_256_TAG_SIZE) - return -1; - *mlen = clen - ORIBATIDA_256_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - clen -= ORIBATIDA_256_TAG_SIZE; - oribatida_get_domains(domains, adlen, clen, ORIBATIDA_256_RATE); - oribatida_256_init(state, mask, domains, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - while (clen > ORIBATIDA_256_RATE) { - memcpy(block, c, ORIBATIDA_256_RATE); - lw_xor_block(block + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_256_RATE); - memcpy(mask, state + SIMP_256_STATE_SIZE - ORIBATIDA_256_MASK_SIZE, - ORIBATIDA_256_MASK_SIZE); - simp_256_permute(state, 4); - c += ORIBATIDA_256_RATE; - m += ORIBATIDA_256_RATE; - clen -= ORIBATIDA_256_RATE; - } - if (clen == ORIBATIDA_256_RATE) { - memcpy(block, c, ORIBATIDA_256_RATE); - lw_xor_block(block + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, - mask, ORIBATIDA_256_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_256_RATE); - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } else if (clen > 0) { - unsigned temp = (unsigned)clen; - memcpy(block, c, temp); - if (temp > (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)) { - lw_xor_block - (block + ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE, mask, - temp - (ORIBATIDA_256_RATE - ORIBATIDA_256_MASK_SIZE)); - } - lw_xor_block_swap(m, state, block, temp); - state[temp] ^= 0x80; /* padding */ - state[SIMP_256_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_256_permute(state, 4); - } - c += clen; - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state, c, ORIBATIDA_256_TAG_SIZE); -} - -int oribatida_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_192_STATE_SIZE]; - unsigned char mask[ORIBATIDA_192_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ORIBATIDA_192_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - oribatida_get_domains(domains, adlen, mlen, ORIBATIDA_192_RATE); - oribatida_192_init(state, mask, domains, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen > ORIBATIDA_192_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_192_RATE); - lw_xor_block(c + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - simp_192_permute(state, 4); - c += ORIBATIDA_192_RATE; - m += ORIBATIDA_192_RATE; - mlen -= ORIBATIDA_192_RATE; - } - if (mlen == ORIBATIDA_192_RATE) { - lw_xor_block_2_dest(c, state, m, ORIBATIDA_192_RATE); - lw_xor_block(c + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } else if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state, m, temp); - if (temp > (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)) { - lw_xor_block - (c + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, mask, - temp - (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)); - } - state[temp] ^= 0x80; /* padding */ - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } - - /* Generate the authentication tag */ - memcpy(c + mlen, state, ORIBATIDA_192_TAG_SIZE); - return 0; -} - -int oribatida_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SIMP_192_STATE_SIZE]; - unsigned char mask[ORIBATIDA_192_MASK_SIZE]; - unsigned char domains[ORIBATIDA_NUM_DOMAINS]; - unsigned char block[ORIBATIDA_192_RATE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ORIBATIDA_192_TAG_SIZE) - return -1; - *mlen = clen - ORIBATIDA_192_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - clen -= ORIBATIDA_192_TAG_SIZE; - oribatida_get_domains(domains, adlen, clen, ORIBATIDA_192_RATE); - oribatida_192_init(state, mask, domains, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - while (clen > ORIBATIDA_192_RATE) { - memcpy(block, c, ORIBATIDA_192_RATE); - lw_xor_block(block + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_192_RATE); - memcpy(mask, state + SIMP_192_STATE_SIZE - ORIBATIDA_192_MASK_SIZE, - ORIBATIDA_192_MASK_SIZE); - simp_192_permute(state, 4); - c += ORIBATIDA_192_RATE; - m += ORIBATIDA_192_RATE; - clen -= ORIBATIDA_192_RATE; - } - if (clen == ORIBATIDA_192_RATE) { - memcpy(block, c, ORIBATIDA_192_RATE); - lw_xor_block(block + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, - mask, ORIBATIDA_192_MASK_SIZE); - lw_xor_block_swap(m, state, block, ORIBATIDA_192_RATE); - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } else if (clen > 0) { - unsigned temp = (unsigned)clen; - memcpy(block, c, temp); - if (temp > (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)) { - lw_xor_block - (block + ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE, mask, - temp - (ORIBATIDA_192_RATE - ORIBATIDA_192_MASK_SIZE)); - } - lw_xor_block_swap(m, state, block, temp); - state[temp] ^= 0x80; /* padding */ - state[SIMP_192_STATE_SIZE - 1] ^= domains[ORIBATIDA_DOMAIN_MSG]; - simp_192_permute(state, 4); - } - c += clen; - - /* Check the authentication tag */ - return aead_check_tag(mtemp, *mlen, state, c, ORIBATIDA_192_TAG_SIZE); -} diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.h b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.h deleted file mode 100644 index dbc374b..0000000 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys-avr/oribatida.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ORIBATIDA_H -#define LWCRYPTO_ORIBATIDA_H - -#include "aead-common.h" - -/** - * \file oribatida.h - * \brief Oribatida authenticated encryption algorithm. - * - * Oribatida is a family of authenticated encryption algorithms based on the - * SimP-256 and SimP-192 permutations which are built around reduced-round - * variants of the Simon-128-128 and Simon-96-96 block ciphers. - * There are two algorithms in the family: - * - * \li Oribatida-256-64 with a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * built around the SimP-256 permutation. This is the primary member of - * the family. - * \li Oribatida-192-96 with a 128-bit key, a 64-bit nonce, and a 96-bit tag, - * built around the SimP-192 permutation. - * - * References: https://www.isical.ac.in/~lightweight/oribatida/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Oribatida-256-64. - */ -#define ORIBATIDA_256_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Oribatida-256-64. - */ -#define ORIBATIDA_256_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Oribatida-256-64. - */ -#define ORIBATIDA_256_NONCE_SIZE 16 - -/** - * \brief Size of the key for Oribatida-192-96. - */ -#define ORIBATIDA_192_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Oribatida-192-96. - */ -#define ORIBATIDA_192_TAG_SIZE 12 - -/** - * \brief Size of the nonce for Oribatida-192-96. - */ -#define ORIBATIDA_192_NONCE_SIZE 8 - -/** - * \brief Meta-information block for the Oribatida-256-64 cipher. - */ -extern aead_cipher_t const oribatida_256_cipher; - -/** - * \brief Meta-information block for the Oribatida-192-96 cipher. - */ -extern aead_cipher_t const oribatida_192_cipher; - -/** - * \brief Encrypts and authenticates a packet with Oribatida-256-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa oribatida_256_aead_decrypt() - */ -int oribatida_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Oribatida-256-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa oribatida_256_aead_encrypt() - */ -int oribatida_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Oribatida-192-96. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 12 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa oribatida_192_aead_decrypt() - */ -int oribatida_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Oribatida-192-96. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 12 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa oribatida_192_aead_encrypt() - */ -int oribatida_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp-avr.S b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp-avr.S new file mode 100644 index 0000000..65fba20 --- /dev/null +++ b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp-avr.S @@ -0,0 +1,949 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global simp_256_permute + .type simp_256_permute, @function +simp_256_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r23,245 + mov r10,r23 + ldi r17,14 + mov r11,r17 + ldi r16,44 + mov r12,r16 + ldi r23,25 + mov r13,r23 + ldi r23,133 + mov r14,r23 + ldi r23,248 + mov r15,r23 + ldi r24,105 + ldi r25,51 +14: + ldi r23,17 +16: + ldd r29,Z+16 + ldd r28,Z+17 + ldd r27,Z+18 + ldd r26,Z+19 + ldd r21,Z+20 + ldd r20,Z+21 + ldd r19,Z+22 + ldd r18,Z+23 + mov r2,r29 + mov r3,r18 + mov r4,r19 + mov r5,r20 + mov r6,r21 + mov r7,r26 + mov r8,r27 + mov r9,r28 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r28 + rol r29 + adc r18,r1 + and r2,r18 + and r3,r19 + and r4,r20 + and r5,r21 + and r6,r26 + and r7,r27 + and r8,r28 + and r9,r29 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + rol r28 + rol r29 + adc r18,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + eor r8,r28 + eor r9,r29 + ldd r0,Z+8 + eor r9,r0 + ldd r0,Z+9 + eor r8,r0 + ldd r0,Z+10 + eor r7,r0 + ldd r0,Z+11 + eor r6,r0 + ldd r0,Z+12 + eor r5,r0 + ldd r0,Z+13 + eor r4,r0 + ldd r0,Z+14 + eor r3,r0 + ldd r0,Z+15 + eor r2,r0 + ldd r0,Z+24 + eor r0,r9 + std Z+24,r0 + ldd r0,Z+25 + eor r0,r8 + std Z+25,r0 + ldd r0,Z+26 + eor r0,r7 + std Z+26,r0 + ldd r0,Z+27 + eor r0,r6 + std Z+27,r0 + ldd r0,Z+28 + eor r0,r5 + std Z+28,r0 + ldd r0,Z+29 + eor r0,r4 + std Z+29,r0 + ldd r0,Z+30 + eor r0,r3 + std Z+30,r0 + ldd r0,Z+31 + eor r0,r2 + std Z+31,r0 + ld r29,Z + ldd r28,Z+1 + ldd r27,Z+2 + ldd r26,Z+3 + ldd r21,Z+4 + ldd r20,Z+5 + ldd r19,Z+6 + ldd r18,Z+7 + mov r0,r1 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r29,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + movw r8,r28 + bst r2,0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + bld r9,7 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + ldi r17,252 + eor r18,r17 + com r19 + com r20 + com r21 + com r26 + com r27 + com r28 + com r29 + mov r0,r1 + bst r10,0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + bld r25,5 + bld r0,0 + eor r18,r0 + ldd r0,Z+8 + eor r0,r29 + std Z+8,r0 + ldd r0,Z+9 + eor r0,r28 + std Z+9,r0 + ldd r0,Z+10 + eor r0,r27 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r26 + std Z+11,r0 + ldd r0,Z+12 + eor r0,r21 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r20 + std Z+13,r0 + ldd r0,Z+14 + eor r0,r19 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r18 + std Z+15,r0 + ldd r9,Z+24 + ldd r8,Z+25 + ldd r7,Z+26 + ldd r6,Z+27 + ldd r5,Z+28 + ldd r4,Z+29 + ldd r3,Z+30 + ldd r2,Z+31 + mov r18,r9 + mov r19,r2 + mov r20,r3 + mov r21,r4 + mov r26,r5 + mov r27,r6 + mov r28,r7 + mov r29,r8 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + adc r2,r1 + and r18,r2 + and r19,r3 + and r20,r4 + and r21,r5 + and r26,r6 + and r27,r7 + and r28,r8 + and r29,r9 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + rol r8 + rol r9 + adc r2,r1 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + ld r0,Z + eor r29,r0 + ldd r0,Z+1 + eor r28,r0 + ldd r0,Z+2 + eor r27,r0 + ldd r0,Z+3 + eor r26,r0 + ldd r0,Z+4 + eor r21,r0 + ldd r0,Z+5 + eor r20,r0 + ldd r0,Z+6 + eor r19,r0 + ldd r0,Z+7 + eor r18,r0 + ldd r0,Z+16 + eor r0,r29 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r28 + std Z+17,r0 + ldd r0,Z+18 + eor r0,r27 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r26 + std Z+19,r0 + ldd r0,Z+20 + eor r0,r21 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r20 + std Z+21,r0 + ldd r0,Z+22 + eor r0,r19 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r18 + std Z+23,r0 + ldd r29,Z+8 + ldd r28,Z+9 + ldd r27,Z+10 + ldd r26,Z+11 + ldd r21,Z+12 + ldd r20,Z+13 + ldd r19,Z+14 + ldd r18,Z+15 + mov r0,r1 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r29,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + movw r8,r28 + bst r18,0 + lsr r29 + ror r28 + ror r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + bld r29,7 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + eor r8,r28 + eor r9,r29 + eor r2,r17 + com r3 + com r4 + com r5 + com r6 + com r7 + com r8 + com r9 + mov r0,r1 + bst r10,0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + bld r25,5 + bld r0,0 + eor r2,r0 + ld r0,Z + eor r0,r9 + st Z,r0 + ldd r0,Z+1 + eor r0,r8 + std Z+1,r0 + ldd r0,Z+2 + eor r0,r7 + std Z+2,r0 + ldd r0,Z+3 + eor r0,r6 + std Z+3,r0 + ldd r0,Z+4 + eor r0,r5 + std Z+4,r0 + ldd r0,Z+5 + eor r0,r4 + std Z+5,r0 + ldd r0,Z+6 + eor r0,r3 + std Z+6,r0 + ldd r0,Z+7 + eor r0,r2 + std Z+7,r0 + dec r23 + breq 5407f + rjmp 16b +5407: + dec r22 + brne 5409f + rjmp 475f +5409: + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + ldd r2,Z+16 + ldd r3,Z+17 + ldd r4,Z+18 + ldd r5,Z+19 + ldd r6,Z+20 + ldd r7,Z+21 + ldd r8,Z+22 + ldd r9,Z+23 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + std Z+4,r6 + std Z+5,r7 + std Z+6,r8 + std Z+7,r9 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+20,r26 + std Z+21,r27 + std Z+22,r28 + std Z+23,r29 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + ldd r2,Z+24 + ldd r3,Z+25 + ldd r4,Z+26 + ldd r5,Z+27 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + std Z+12,r6 + std Z+13,r7 + std Z+14,r8 + std Z+15,r9 + std Z+24,r18 + std Z+25,r19 + std Z+26,r20 + std Z+27,r21 + std Z+28,r26 + std Z+29,r27 + std Z+30,r28 + std Z+31,r29 + rjmp 14b +475: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size simp_256_permute, .-simp_256_permute + + .text +.global simp_192_permute + .type simp_192_permute, @function +simp_192_permute: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ldi r25,245 + mov r8,r25 + ldi r24,14 + mov r9,r24 + ldi r23,44 + mov r10,r23 + ldi r17,25 + mov r11,r17 + ldi r16,133 + mov r12,r16 + ldi r23,248 + mov r13,r23 + ldi r23,105 + mov r14,r23 + ldi r23,51 + mov r15,r23 +16: + ldi r23,13 +18: + ldd r27,Z+12 + ldd r26,Z+13 + ldd r21,Z+14 + ldd r20,Z+15 + ldd r19,Z+16 + ldd r18,Z+17 + mov r2,r27 + mov r3,r18 + mov r4,r19 + mov r5,r20 + mov r6,r21 + mov r7,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + adc r18,r1 + and r2,r18 + and r3,r19 + and r4,r20 + and r5,r21 + and r6,r26 + and r7,r27 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r26 + rol r27 + adc r18,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + ldd r0,Z+6 + eor r7,r0 + ldd r0,Z+7 + eor r6,r0 + ldd r0,Z+8 + eor r5,r0 + ldd r0,Z+9 + eor r4,r0 + ldd r0,Z+10 + eor r3,r0 + ldd r0,Z+11 + eor r2,r0 + ldd r0,Z+18 + eor r0,r7 + std Z+18,r0 + ldd r0,Z+19 + eor r0,r6 + std Z+19,r0 + ldd r0,Z+20 + eor r0,r5 + std Z+20,r0 + ldd r0,Z+21 + eor r0,r4 + std Z+21,r0 + ldd r0,Z+22 + eor r0,r3 + std Z+22,r0 + ldd r0,Z+23 + eor r0,r2 + std Z+23,r0 + ld r27,Z + ldd r26,Z+1 + ldd r21,Z+2 + ldd r20,Z+3 + ldd r19,Z+4 + ldd r18,Z+5 + mov r0,r1 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r27,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + bst r2,0 + lsr r7 + ror r6 + ror r5 + ror r4 + ror r3 + ror r2 + bld r7,7 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + ldi r25,252 + eor r18,r25 + com r19 + com r20 + com r21 + com r26 + com r27 + mov r0,r1 + bst r8,0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + ror r9 + ror r8 + bld r15,5 + bld r0,0 + eor r18,r0 + ldd r0,Z+6 + eor r0,r27 + std Z+6,r0 + ldd r0,Z+7 + eor r0,r26 + std Z+7,r0 + ldd r0,Z+8 + eor r0,r21 + std Z+8,r0 + ldd r0,Z+9 + eor r0,r20 + std Z+9,r0 + ldd r0,Z+10 + eor r0,r19 + std Z+10,r0 + ldd r0,Z+11 + eor r0,r18 + std Z+11,r0 + ldd r7,Z+18 + ldd r6,Z+19 + ldd r5,Z+20 + ldd r4,Z+21 + ldd r3,Z+22 + ldd r2,Z+23 + mov r18,r7 + mov r19,r2 + mov r20,r3 + mov r21,r4 + mov r26,r5 + mov r27,r6 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + adc r2,r1 + and r18,r2 + and r19,r3 + and r20,r4 + and r21,r5 + and r26,r6 + and r27,r7 + lsl r2 + rol r3 + rol r4 + rol r5 + rol r6 + rol r7 + adc r2,r1 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + eor r26,r6 + eor r27,r7 + ld r0,Z + eor r27,r0 + ldd r0,Z+1 + eor r26,r0 + ldd r0,Z+2 + eor r21,r0 + ldd r0,Z+3 + eor r20,r0 + ldd r0,Z+4 + eor r19,r0 + ldd r0,Z+5 + eor r18,r0 + ldd r0,Z+12 + eor r0,r27 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r26 + std Z+13,r0 + ldd r0,Z+14 + eor r0,r21 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r20 + std Z+15,r0 + ldd r0,Z+16 + eor r0,r19 + std Z+16,r0 + ldd r0,Z+17 + eor r0,r18 + std Z+17,r0 + ldd r27,Z+6 + ldd r26,Z+7 + ldd r21,Z+8 + ldd r20,Z+9 + ldd r19,Z+10 + ldd r18,Z+11 + mov r0,r1 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + ror r0 + or r27,r0 + movw r2,r18 + movw r4,r20 + movw r6,r26 + bst r18,0 + lsr r27 + ror r26 + ror r21 + ror r20 + ror r19 + ror r18 + bld r27,7 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + eor r6,r26 + eor r7,r27 + eor r2,r25 + com r3 + com r4 + com r5 + com r6 + com r7 + mov r0,r1 + bst r8,0 + lsr r15 + ror r14 + ror r13 + ror r12 + ror r11 + ror r10 + ror r9 + ror r8 + bld r15,5 + bld r0,0 + eor r2,r0 + ld r0,Z + eor r0,r7 + st Z,r0 + ldd r0,Z+1 + eor r0,r6 + std Z+1,r0 + ldd r0,Z+2 + eor r0,r5 + std Z+2,r0 + ldd r0,Z+3 + eor r0,r4 + std Z+3,r0 + ldd r0,Z+4 + eor r0,r3 + std Z+4,r0 + ldd r0,Z+5 + eor r0,r2 + std Z+5,r0 + dec r23 + breq 5323f + rjmp 18b +5323: + dec r22 + breq 375f + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r2,Z+12 + ldd r3,Z+13 + ldd r4,Z+14 + ldd r5,Z+15 + ldd r6,Z+16 + ldd r7,Z+17 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + std Z+4,r6 + std Z+5,r7 + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + std Z+16,r26 + std Z+17,r27 + ldd r18,Z+6 + ldd r19,Z+7 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r26,Z+10 + ldd r27,Z+11 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + std Z+6,r2 + std Z+7,r3 + std Z+8,r4 + std Z+9,r5 + std Z+10,r6 + std Z+11,r7 + std Z+18,r18 + std Z+19,r19 + std Z+20,r20 + std Z+21,r21 + std Z+22,r26 + std Z+23,r27 + rjmp 16b +375: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size simp_192_permute, .-simp_192_permute + +#endif diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp.c b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp.c index 4ca50d0..5d2144e 100644 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp.c +++ b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-simp.c @@ -22,6 +22,8 @@ #include "internal-simp.h" +#if !defined(__AVR__) + /** * \brief Number of rounds for the inner block cipher within SimP-256. */ @@ -166,3 +168,5 @@ void simp_192_permute(unsigned char state[SIMP_192_STATE_SIZE], unsigned steps) be_store_word48(state + 12, x2); be_store_word48(state + 18, x3); } + +#endif /* !__AVR__ */ diff --git a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-util.h b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-util.h index e79158c..e30166d 100644 --- a/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-util.h +++ b/oribatida/Implementations/crypto_aead/oribatida256v12/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/api.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/encrypt.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/encrypt.c deleted file mode 100644 index a36c2ea..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "photon-beetle.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return photon_beetle_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return photon_beetle_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.c deleted file mode 100644 index b8743fe..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.c +++ /dev/null @@ -1,479 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-photon256.h" -#include "internal-util.h" - -/** - * \brief Number of rounds in the PHOTON-256 permutation in bit-sliced form. - */ -#define PHOTON256_ROUNDS 12 - -/* Round constants for PHOTON-256 */ -static uint32_t const photon256_rc[PHOTON256_ROUNDS] = { - 0x96d2f0e1, 0xb4f0d2c3, 0xf0b49687, 0x692d0f1e, - 0x5a1e3c2d, 0x3c785a4b, 0xe1a58796, 0x4b0f2d3c, - 0x1e5a7869, 0xa5e1c3d2, 0xd296b4a5, 0x2d694b5a -}; - -/** - * \brief Evaluates the PHOTON-256 S-box in bit-sliced form. - * - * \param x0 Slice with bit 0 of all nibbles. - * \param x1 Slice with bit 1 of all nibbles. - * \param x2 Slice with bit 2 of all nibbles. - * \param x3 Slice with bit 3 of all nibbles. - * - * This bit-sliced S-box implementation is based on the AVR version - * "add_avr8_bitslice_asm" from the PHOTON-Beetle reference code. - */ -#define photon256_sbox(x0, x1, x2, x3) \ - do { \ - x1 ^= x2; \ - x3 ^= (x2 & x1); \ - t1 = x3; \ - x3 = (x3 & x1) ^ x2; \ - t2 = x3; \ - x3 ^= x0; \ - x3 = ~(x3); \ - x2 = x3; \ - t2 |= x0; \ - x0 ^= t1; \ - x1 ^= x0; \ - x2 |= x1; \ - x2 ^= t1; \ - x1 ^= t2; \ - x3 ^= x1; \ - } while (0) - -/** - * \brief Performs a field multiplication on the 8 nibbles in a row. - * - * \param a Field constant to multiply by. - * \param x Bit-sliced form of the row, with bits 0..3 of each nibble - * in bytes 0..3 of the word. - * - * \return a * x packed into the bytes of a word. - */ -static uint32_t photon256_field_multiply(uint8_t a, uint32_t x) -{ - /* For each 4-bit nibble we need to do this: - * - * result = 0; - * for (bit = 0; bit < 4; ++ bit) { - * if ((a & (1 << bit)) != 0) - * result ^= x; - * if ((x & 0x08) != 0) { - * x = (x << 1) ^ 3; - * } else { - * x = (x << 1); - * } - * } - * - * We don't need to worry about constant time for "a" because it is a - * known constant that isn't data-dependent. But we do need to worry - * about constant time for "x" as it is data. - */ - uint32_t result = 0; - uint32_t t; - #define PARALLEL_CONDITIONAL_ADD(bit) \ - do { \ - if ((a) & (1 << (bit))) \ - result ^= x; \ - } while (0) - #define PARALELL_ROTATE() \ - do { \ - t = x >> 24; \ - x = (x << 8) ^ t ^ (t << 8); \ - } while (0) - PARALLEL_CONDITIONAL_ADD(0); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(1); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(2); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(3); - return result; -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Converts a PHOTON-256 state into bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_to_sliced - (uint32_t out[PHOTON256_STATE_SIZE / 4], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* We first scatter bits 0..3 of the nibbles to bytes 0..3 of the words. - * Then we rearrange the bytes to group all bits N into word N. - * - * Permutation generated with "http://programming.sirrida.de/calcperm.php". - * - * P = [0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 - * 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31] - */ - uint32_t t0, t1, t2, t3; - #define TO_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - #define FROM_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - t0 = le_load_word32(in); - t1 = le_load_word32(in + 4); - t2 = le_load_word32(in + 8); - t3 = le_load_word32(in + 12); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[0] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[1] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[2] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[3] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); - t0 = le_load_word32(in + 16); - t1 = le_load_word32(in + 20); - t2 = le_load_word32(in + 24); - t3 = le_load_word32(in + 28); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[4] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[5] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[6] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[7] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); -} - -/** - * \brief Converts a PHOTON-256 state from bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_from_sliced - (unsigned char out[PHOTON256_STATE_SIZE], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* Do the reverse of photon256_to_sliced() */ - uint32_t x0, x1, x2, x3; - x0 = ((uint32_t)(in[0])) | - (((uint32_t)(in[4])) << 8) | - (((uint32_t)(in[8])) << 16) | - (((uint32_t)(in[12])) << 24); - x1 = ((uint32_t)(in[1])) | - (((uint32_t)(in[5])) << 8) | - (((uint32_t)(in[9])) << 16) | - (((uint32_t)(in[13])) << 24); - x2 = ((uint32_t)(in[2])) | - (((uint32_t)(in[6])) << 8) | - (((uint32_t)(in[10])) << 16) | - (((uint32_t)(in[14])) << 24); - x3 = ((uint32_t)(in[3])) | - (((uint32_t)(in[7])) << 8) | - (((uint32_t)(in[11])) << 16) | - (((uint32_t)(in[15])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out, x0); - le_store_word32(out + 4, x1); - le_store_word32(out + 8, x2); - le_store_word32(out + 12, x3); - x0 = ((uint32_t)(in[16])) | - (((uint32_t)(in[20])) << 8) | - (((uint32_t)(in[24])) << 16) | - (((uint32_t)(in[28])) << 24); - x1 = ((uint32_t)(in[17])) | - (((uint32_t)(in[21])) << 8) | - (((uint32_t)(in[25])) << 16) | - (((uint32_t)(in[29])) << 24); - x2 = ((uint32_t)(in[18])) | - (((uint32_t)(in[22])) << 8) | - (((uint32_t)(in[26])) << 16) | - (((uint32_t)(in[30])) << 24); - x3 = ((uint32_t)(in[19])) | - (((uint32_t)(in[23])) << 8) | - (((uint32_t)(in[27])) << 16) | - (((uint32_t)(in[31])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out + 16, x0); - le_store_word32(out + 20, x1); - le_store_word32(out + 24, x2); - le_store_word32(out + 28, x3); -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -/* Index the bit-sliced state bytes in little-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[(row)] = (uint8_t)(value); \ - S.bytes[(row) + 4] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 8] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 12] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[(row) + 12] = (uint8_t)(value); \ - S.bytes[(row) + 16] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 20] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 24] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#else -/* Index the bit-sliced state bytes in big-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[3 - (row)] = (uint8_t)(value); \ - S.bytes[7 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[11 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[15 - (row)] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[20 - (row)] = (uint8_t)(value); \ - S.bytes[24 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[28 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[32 - (row)] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#endif - -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]) -{ - union { - uint32_t words[PHOTON256_STATE_SIZE / 4]; - uint8_t bytes[PHOTON256_STATE_SIZE]; - } S; - uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - uint8_t round; - - /* Convert the state into bit-sliced form */ - photon256_to_sliced(S.words, state); - - /* Perform all 12 permutation rounds */ - for (round = 0; round < PHOTON256_ROUNDS; ++round) { - /* Add the constants for this round */ - t0 = photon256_rc[round]; - S.words[0] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[1] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[2] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[3] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[4] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[5] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[6] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[7] ^= t0 & 0x01010101U; - - /* Apply the sbox to all nibbles in the state */ - photon256_sbox(S.words[0], S.words[1], S.words[2], S.words[3]); - photon256_sbox(S.words[4], S.words[5], S.words[6], S.words[7]); - - /* Rotate all rows left by the row number. - * - * We do this by applying permutations to the top and bottom words - * to rearrange the bits into the rotated form. Permutations - * generated with "http://programming.sirrida.de/calcperm.php". - * - * P_top = [0 1 2 3 4 5 6 7 15 8 9 10 11 12 13 14 22 23 - * 16 17 18 19 20 21 29 30 31 24 25 26 27 28] - * P_bot = [4 5 6 7 0 1 2 3 11 12 13 14 15 8 9 10 18 19 - * 20 21 22 23 16 17 25 26 27 28 29 30 31 24 - */ - #define TOP_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x07030100, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - #define BOTTOM_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x080c0e0f, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - TOP_ROTATE_PERM(S.words[0]); - TOP_ROTATE_PERM(S.words[1]); - TOP_ROTATE_PERM(S.words[2]); - TOP_ROTATE_PERM(S.words[3]); - BOTTOM_ROTATE_PERM(S.words[4]); - BOTTOM_ROTATE_PERM(S.words[5]); - BOTTOM_ROTATE_PERM(S.words[6]); - BOTTOM_ROTATE_PERM(S.words[7]); - - /* Mix the columns */ - #define MUL(a, x) (photon256_field_multiply((a), (x))) - t0 = READ_ROW0(); - t1 = READ_ROW1(); - t2 = READ_ROW2(); - t3 = READ_ROW3(); - t4 = READ_ROW4(); - t5 = READ_ROW5(); - t6 = READ_ROW6(); - t7 = READ_ROW7(); - t8 = MUL(0x02, t0) ^ MUL(0x04, t1) ^ MUL(0x02, t2) ^ MUL(0x0b, t3) ^ - MUL(0x02, t4) ^ MUL(0x08, t5) ^ MUL(0x05, t6) ^ MUL(0x06, t7); - WRITE_ROW(0, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x09, t1) ^ MUL(0x08, t2) ^ MUL(0x0d, t3) ^ - MUL(0x07, t4) ^ MUL(0x07, t5) ^ MUL(0x05, t6) ^ MUL(0x02, t7); - WRITE_ROW(1, t8); - t8 = MUL(0x04, t0) ^ MUL(0x04, t1) ^ MUL(0x0d, t2) ^ MUL(0x0d, t3) ^ - MUL(0x09, t4) ^ MUL(0x04, t5) ^ MUL(0x0d, t6) ^ MUL(0x09, t7); - WRITE_ROW(2, t8); - t8 = MUL(0x01, t0) ^ MUL(0x06, t1) ^ MUL(0x05, t2) ^ MUL(0x01, t3) ^ - MUL(0x0c, t4) ^ MUL(0x0d, t5) ^ MUL(0x0f, t6) ^ MUL(0x0e, t7); - WRITE_ROW(3, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x0c, t1) ^ MUL(0x09, t2) ^ MUL(0x0d, t3) ^ - MUL(0x0e, t4) ^ MUL(0x05, t5) ^ MUL(0x0e, t6) ^ MUL(0x0d, t7); - WRITE_ROW(4, t8); - t8 = MUL(0x09, t0) ^ MUL(0x0e, t1) ^ MUL(0x05, t2) ^ MUL(0x0f, t3) ^ - MUL(0x04, t4) ^ MUL(0x0c, t5) ^ MUL(0x09, t6) ^ MUL(0x06, t7); - WRITE_ROW(5, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x02, t1) ^ MUL(0x02, t2) ^ MUL(0x0a, t3) ^ - MUL(0x03, t4) ^ MUL(0x01, t5) ^ MUL(0x01, t6) ^ MUL(0x0e, t7); - WRITE_ROW(6, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x01, t1) ^ MUL(0x0d, t2) ^ MUL(0x0a, t3) ^ - MUL(0x05, t4) ^ MUL(0x0a, t5) ^ MUL(0x02, t6) ^ MUL(0x03, t7); - WRITE_ROW(7, t8); - } - - /* Convert back from bit-sliced form to regular form */ - photon256_from_sliced(state, S.bytes); -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.h deleted file mode 100644 index ce8729a..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-photon256.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_PHOTON256_H -#define LW_INTERNAL_PHOTON256_H - -/** - * \file internal-photon256.h - * \brief Internal implementation of the PHOTON-256 permutation. - * - * Warning: The current implementation of PHOTON-256 is constant-time - * but not constant-cache. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the PHOTON-256 permutation state in bytes. - */ -#define PHOTON256_STATE_SIZE 32 - -/** - * \brief Permutes the PHOTON-256 state. - * - * \param state The state to be permuted. - */ -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-util.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.c deleted file mode 100644 index f44bdad..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.c +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "photon-beetle.h" -#include "internal-photon256.h" -#include "internal-util.h" -#include - -aead_cipher_t const photon_beetle_128_cipher = { - "PHOTON-Beetle-AEAD-ENC-128", - PHOTON_BEETLE_KEY_SIZE, - PHOTON_BEETLE_NONCE_SIZE, - PHOTON_BEETLE_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - photon_beetle_128_aead_encrypt, - photon_beetle_128_aead_decrypt -}; - -aead_cipher_t const photon_beetle_32_cipher = { - "PHOTON-Beetle-AEAD-ENC-32", - PHOTON_BEETLE_KEY_SIZE, - PHOTON_BEETLE_NONCE_SIZE, - PHOTON_BEETLE_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - photon_beetle_32_aead_encrypt, - photon_beetle_32_aead_decrypt -}; - -aead_hash_algorithm_t const photon_beetle_hash_algorithm = { - "PHOTON-Beetle-HASH", - sizeof(int), - PHOTON_BEETLE_HASH_SIZE, - AEAD_FLAG_NONE, - photon_beetle_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-128. - */ -#define PHOTON_BEETLE_128_RATE 16 - -/** - * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-32. - */ -#define PHOTON_BEETLE_32_RATE 4 - -/* Shifts a domain constant from the spec to the correct bit position */ -#define DOMAIN(c) ((c) << 5) - -/** - * \brief Processes the associated data for PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must be non-zero. - * \param rate Rate of absorption for the data. - * \param mempty Non-zero if the message is empty. - */ -static void photon_beetle_process_ad - (unsigned char state[PHOTON256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen, - unsigned rate, int mempty) -{ - unsigned temp; - - /* Absorb as many full rate blocks as possible */ - while (adlen > rate) { - photon256_permute(state); - lw_xor_block(state, ad, rate); - ad += rate; - adlen -= rate; - } - - /* Pad and absorb the last block */ - temp = (unsigned)adlen; - photon256_permute(state); - lw_xor_block(state, ad, temp); - if (temp < rate) - state[temp] ^= 0x01; /* padding */ - - /* Add the domain constant to finalize associated data processing */ - if (mempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(3); - else if (mempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(4); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -/** - * \brief Rotates part of the PHOTON-256 state right by one bit. - * - * \param out Output state buffer. - * \param in Input state buffer, must not overlap with \a out. - * \param len Length of the state buffer. - */ -static void photon_beetle_rotate1 - (unsigned char *out, const unsigned char *in, unsigned len) -{ - unsigned posn; - for (posn = 0; posn < (len - 1); ++posn) - out[posn] = (in[posn] >> 1) | (in[posn + 1] << 7); - out[len - 1] = (in[len - 1] >> 1) | (in[0] << 7); -} - -/** - * \brief Encrypts a plaintext block with PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Length of the message, must be non-zero. - * \param rate Rate of absorption for the data. - * \param adempty Non-zero if the associated data is empty. - */ -static void photon_beetle_encrypt - (unsigned char state[PHOTON256_STATE_SIZE], - unsigned char *c, const unsigned char *m, unsigned long long mlen, - unsigned rate, int adempty) -{ - unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ - unsigned temp; - - /* Process all plaintext blocks except the last */ - while (mlen > rate) { - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - lw_xor_block(state, m, rate); - lw_xor_block_2_src(c, m, shuffle, rate); - c += rate; - m += rate; - mlen -= rate; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - if (temp == rate) { - lw_xor_block(state, m, rate); - lw_xor_block_2_src(c, m, shuffle, rate); - } else { - lw_xor_block(state, m, temp); - state[temp] ^= 0x01; /* padding */ - lw_xor_block_2_src(c, m, shuffle, temp); - } - - /* Add the domain constant to finalize message processing */ - if (adempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); - else if (adempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -/** - * \brief Decrypts a ciphertext block with PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param mlen Length of the message, must be non-zero. - * \param rate Rate of absorption for the data. - * \param adempty Non-zero if the associated data is empty. - */ -static void photon_beetle_decrypt - (unsigned char state[PHOTON256_STATE_SIZE], - unsigned char *m, const unsigned char *c, unsigned long long mlen, - unsigned rate, int adempty) -{ - unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ - unsigned temp; - - /* Process all plaintext blocks except the last */ - while (mlen > rate) { - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - lw_xor_block_2_src(m, c, shuffle, rate); - lw_xor_block(state, m, rate); - c += rate; - m += rate; - mlen -= rate; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - if (temp == rate) { - lw_xor_block_2_src(m, c, shuffle, rate); - lw_xor_block(state, m, rate); - } else { - lw_xor_block_2_src(m, c, shuffle, temp); - lw_xor_block(state, m, temp); - state[temp] ^= 0x01; /* padding */ - } - - /* Add the domain constant to finalize message processing */ - if (adempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); - else if (adempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -int photon_beetle_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_128_RATE, mlen == 0); - } else if (mlen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - photon_beetle_encrypt - (state, c, m, mlen, PHOTON_BEETLE_128_RATE, adlen == 0); - } - - /* Generate the authentication tag */ - photon256_permute(state); - memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); - return 0; -} - -int photon_beetle_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < PHOTON_BEETLE_TAG_SIZE) - return -1; - *mlen = clen - PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - clen -= PHOTON_BEETLE_TAG_SIZE; - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_128_RATE, clen == 0); - } else if (clen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - photon_beetle_decrypt - (state, m, c, clen, PHOTON_BEETLE_128_RATE, adlen == 0); - } - - /* Check the authentication tag */ - photon256_permute(state); - return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); -} - -int photon_beetle_32_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_32_RATE, mlen == 0); - } else if (mlen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - photon_beetle_encrypt - (state, c, m, mlen, PHOTON_BEETLE_32_RATE, adlen == 0); - } - - /* Generate the authentication tag */ - photon256_permute(state); - memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); - return 0; -} - -int photon_beetle_32_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < PHOTON_BEETLE_TAG_SIZE) - return -1; - *mlen = clen - PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - clen -= PHOTON_BEETLE_TAG_SIZE; - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_32_RATE, clen == 0); - } else if (clen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - photon_beetle_decrypt - (state, m, c, clen, PHOTON_BEETLE_32_RATE, adlen == 0); - } - - /* Check the authentication tag */ - photon256_permute(state); - return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); -} - -int photon_beetle_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - unsigned temp; - - /* Absorb the input data */ - if (inlen == 0) { - /* No input data at all */ - memset(state, 0, sizeof(state) - 1); - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); - } else if (inlen <= PHOTON_BEETLE_128_RATE) { - /* Only one block of input data, which may require padding */ - temp = (unsigned)inlen; - memcpy(state, in, temp); - memset(state + temp, 0, sizeof(state) - temp - 1); - if (temp < PHOTON_BEETLE_128_RATE) { - state[temp] = 0x01; - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); - } else { - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(2); - } - } else { - /* Initialize the state with the first block, then absorb the rest */ - memcpy(state, in, PHOTON_BEETLE_128_RATE); - memset(state + PHOTON_BEETLE_128_RATE, 0, - sizeof(state) - PHOTON_BEETLE_128_RATE); - in += PHOTON_BEETLE_128_RATE; - inlen -= PHOTON_BEETLE_128_RATE; - while (inlen > PHOTON_BEETLE_32_RATE) { - photon256_permute(state); - lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); - in += PHOTON_BEETLE_32_RATE; - inlen -= PHOTON_BEETLE_32_RATE; - } - photon256_permute(state); - temp = (unsigned)inlen; - if (temp == PHOTON_BEETLE_32_RATE) { - lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } else { - lw_xor_block(state, in, temp); - state[temp] ^= 0x01; - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); - } - } - - /* Generate the output hash */ - photon256_permute(state); - memcpy(out, state, 16); - photon256_permute(state); - memcpy(out + 16, state, 16); - return 0; -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.h deleted file mode 100644 index 2d94a7e..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys-avr/photon-beetle.h +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_PHOTON_BEETLE_H -#define LWCRYPTO_PHOTON_BEETLE_H - -#include "aead-common.h" - -/** - * \file photon-beetle.h - * \brief PHOTON-Beetle authenticated encryption algorithm. - * - * PHOTON-Beetle is a family of authenticated encryption algorithms based - * on the PHOTON-256 permutation and using the Beetle sponge mode. - * There are three algorithms in the family: - * - * \li PHOTON-Beetle-AEAD-ENC-128 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag. Data is handled in 16 byte blocks. This is the primary - * member of the family for encryption. - * \li PHOTON-Beetle-AEAD-ENC-32 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag. Data is handled in 4 byte blocks. - * \li PHOTON-Beetle-Hash with a 256-bit hash output. The initial data is - * handled as a 16 byte block, and then the remaining bytes are processed - * in 4 byte blocks. - * - * References: https://www.isical.ac.in/~lightweight/beetle/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for PHOTON-Beetle-HASH. - */ -#define PHOTON_BEETLE_HASH_SIZE 32 - -/** - * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-128 cipher. - */ -extern aead_cipher_t const photon_beetle_128_cipher; - -/** - * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-32 cipher. - */ -extern aead_cipher_t const photon_beetle_32_cipher; - -/** - * \brief Meta-information block for the PHOTON-Beetle-HASH algorithm. - */ -extern aead_hash_algorithm_t const photon_beetle_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa photon_beetle_128_aead_decrypt() - */ -int photon_beetle_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa photon_beetle_128_aead_encrypt() - */ -int photon_beetle_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa photon_beetle_32_aead_decrypt() - */ -int photon_beetle_32_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa photon_beetle_32_aead_encrypt() - */ -int photon_beetle_32_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with PHOTON-Beetle-HASH to - * generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * PHOTON_BEETLE_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int photon_beetle_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys/internal-util.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys/internal-util.h +++ b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/api.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/encrypt.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/encrypt.c deleted file mode 100644 index 17af9cd..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "photon-beetle.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return photon_beetle_32_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return photon_beetle_32_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.c deleted file mode 100644 index b8743fe..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.c +++ /dev/null @@ -1,479 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-photon256.h" -#include "internal-util.h" - -/** - * \brief Number of rounds in the PHOTON-256 permutation in bit-sliced form. - */ -#define PHOTON256_ROUNDS 12 - -/* Round constants for PHOTON-256 */ -static uint32_t const photon256_rc[PHOTON256_ROUNDS] = { - 0x96d2f0e1, 0xb4f0d2c3, 0xf0b49687, 0x692d0f1e, - 0x5a1e3c2d, 0x3c785a4b, 0xe1a58796, 0x4b0f2d3c, - 0x1e5a7869, 0xa5e1c3d2, 0xd296b4a5, 0x2d694b5a -}; - -/** - * \brief Evaluates the PHOTON-256 S-box in bit-sliced form. - * - * \param x0 Slice with bit 0 of all nibbles. - * \param x1 Slice with bit 1 of all nibbles. - * \param x2 Slice with bit 2 of all nibbles. - * \param x3 Slice with bit 3 of all nibbles. - * - * This bit-sliced S-box implementation is based on the AVR version - * "add_avr8_bitslice_asm" from the PHOTON-Beetle reference code. - */ -#define photon256_sbox(x0, x1, x2, x3) \ - do { \ - x1 ^= x2; \ - x3 ^= (x2 & x1); \ - t1 = x3; \ - x3 = (x3 & x1) ^ x2; \ - t2 = x3; \ - x3 ^= x0; \ - x3 = ~(x3); \ - x2 = x3; \ - t2 |= x0; \ - x0 ^= t1; \ - x1 ^= x0; \ - x2 |= x1; \ - x2 ^= t1; \ - x1 ^= t2; \ - x3 ^= x1; \ - } while (0) - -/** - * \brief Performs a field multiplication on the 8 nibbles in a row. - * - * \param a Field constant to multiply by. - * \param x Bit-sliced form of the row, with bits 0..3 of each nibble - * in bytes 0..3 of the word. - * - * \return a * x packed into the bytes of a word. - */ -static uint32_t photon256_field_multiply(uint8_t a, uint32_t x) -{ - /* For each 4-bit nibble we need to do this: - * - * result = 0; - * for (bit = 0; bit < 4; ++ bit) { - * if ((a & (1 << bit)) != 0) - * result ^= x; - * if ((x & 0x08) != 0) { - * x = (x << 1) ^ 3; - * } else { - * x = (x << 1); - * } - * } - * - * We don't need to worry about constant time for "a" because it is a - * known constant that isn't data-dependent. But we do need to worry - * about constant time for "x" as it is data. - */ - uint32_t result = 0; - uint32_t t; - #define PARALLEL_CONDITIONAL_ADD(bit) \ - do { \ - if ((a) & (1 << (bit))) \ - result ^= x; \ - } while (0) - #define PARALELL_ROTATE() \ - do { \ - t = x >> 24; \ - x = (x << 8) ^ t ^ (t << 8); \ - } while (0) - PARALLEL_CONDITIONAL_ADD(0); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(1); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(2); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(3); - return result; -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Converts a PHOTON-256 state into bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_to_sliced - (uint32_t out[PHOTON256_STATE_SIZE / 4], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* We first scatter bits 0..3 of the nibbles to bytes 0..3 of the words. - * Then we rearrange the bytes to group all bits N into word N. - * - * Permutation generated with "http://programming.sirrida.de/calcperm.php". - * - * P = [0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 - * 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31] - */ - uint32_t t0, t1, t2, t3; - #define TO_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - #define FROM_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - t0 = le_load_word32(in); - t1 = le_load_word32(in + 4); - t2 = le_load_word32(in + 8); - t3 = le_load_word32(in + 12); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[0] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[1] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[2] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[3] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); - t0 = le_load_word32(in + 16); - t1 = le_load_word32(in + 20); - t2 = le_load_word32(in + 24); - t3 = le_load_word32(in + 28); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[4] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[5] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[6] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[7] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); -} - -/** - * \brief Converts a PHOTON-256 state from bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_from_sliced - (unsigned char out[PHOTON256_STATE_SIZE], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* Do the reverse of photon256_to_sliced() */ - uint32_t x0, x1, x2, x3; - x0 = ((uint32_t)(in[0])) | - (((uint32_t)(in[4])) << 8) | - (((uint32_t)(in[8])) << 16) | - (((uint32_t)(in[12])) << 24); - x1 = ((uint32_t)(in[1])) | - (((uint32_t)(in[5])) << 8) | - (((uint32_t)(in[9])) << 16) | - (((uint32_t)(in[13])) << 24); - x2 = ((uint32_t)(in[2])) | - (((uint32_t)(in[6])) << 8) | - (((uint32_t)(in[10])) << 16) | - (((uint32_t)(in[14])) << 24); - x3 = ((uint32_t)(in[3])) | - (((uint32_t)(in[7])) << 8) | - (((uint32_t)(in[11])) << 16) | - (((uint32_t)(in[15])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out, x0); - le_store_word32(out + 4, x1); - le_store_word32(out + 8, x2); - le_store_word32(out + 12, x3); - x0 = ((uint32_t)(in[16])) | - (((uint32_t)(in[20])) << 8) | - (((uint32_t)(in[24])) << 16) | - (((uint32_t)(in[28])) << 24); - x1 = ((uint32_t)(in[17])) | - (((uint32_t)(in[21])) << 8) | - (((uint32_t)(in[25])) << 16) | - (((uint32_t)(in[29])) << 24); - x2 = ((uint32_t)(in[18])) | - (((uint32_t)(in[22])) << 8) | - (((uint32_t)(in[26])) << 16) | - (((uint32_t)(in[30])) << 24); - x3 = ((uint32_t)(in[19])) | - (((uint32_t)(in[23])) << 8) | - (((uint32_t)(in[27])) << 16) | - (((uint32_t)(in[31])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out + 16, x0); - le_store_word32(out + 20, x1); - le_store_word32(out + 24, x2); - le_store_word32(out + 28, x3); -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -/* Index the bit-sliced state bytes in little-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[(row)] = (uint8_t)(value); \ - S.bytes[(row) + 4] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 8] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 12] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[(row) + 12] = (uint8_t)(value); \ - S.bytes[(row) + 16] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 20] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 24] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#else -/* Index the bit-sliced state bytes in big-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[3 - (row)] = (uint8_t)(value); \ - S.bytes[7 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[11 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[15 - (row)] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[20 - (row)] = (uint8_t)(value); \ - S.bytes[24 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[28 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[32 - (row)] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#endif - -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]) -{ - union { - uint32_t words[PHOTON256_STATE_SIZE / 4]; - uint8_t bytes[PHOTON256_STATE_SIZE]; - } S; - uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - uint8_t round; - - /* Convert the state into bit-sliced form */ - photon256_to_sliced(S.words, state); - - /* Perform all 12 permutation rounds */ - for (round = 0; round < PHOTON256_ROUNDS; ++round) { - /* Add the constants for this round */ - t0 = photon256_rc[round]; - S.words[0] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[1] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[2] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[3] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[4] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[5] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[6] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[7] ^= t0 & 0x01010101U; - - /* Apply the sbox to all nibbles in the state */ - photon256_sbox(S.words[0], S.words[1], S.words[2], S.words[3]); - photon256_sbox(S.words[4], S.words[5], S.words[6], S.words[7]); - - /* Rotate all rows left by the row number. - * - * We do this by applying permutations to the top and bottom words - * to rearrange the bits into the rotated form. Permutations - * generated with "http://programming.sirrida.de/calcperm.php". - * - * P_top = [0 1 2 3 4 5 6 7 15 8 9 10 11 12 13 14 22 23 - * 16 17 18 19 20 21 29 30 31 24 25 26 27 28] - * P_bot = [4 5 6 7 0 1 2 3 11 12 13 14 15 8 9 10 18 19 - * 20 21 22 23 16 17 25 26 27 28 29 30 31 24 - */ - #define TOP_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x07030100, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - #define BOTTOM_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x080c0e0f, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - TOP_ROTATE_PERM(S.words[0]); - TOP_ROTATE_PERM(S.words[1]); - TOP_ROTATE_PERM(S.words[2]); - TOP_ROTATE_PERM(S.words[3]); - BOTTOM_ROTATE_PERM(S.words[4]); - BOTTOM_ROTATE_PERM(S.words[5]); - BOTTOM_ROTATE_PERM(S.words[6]); - BOTTOM_ROTATE_PERM(S.words[7]); - - /* Mix the columns */ - #define MUL(a, x) (photon256_field_multiply((a), (x))) - t0 = READ_ROW0(); - t1 = READ_ROW1(); - t2 = READ_ROW2(); - t3 = READ_ROW3(); - t4 = READ_ROW4(); - t5 = READ_ROW5(); - t6 = READ_ROW6(); - t7 = READ_ROW7(); - t8 = MUL(0x02, t0) ^ MUL(0x04, t1) ^ MUL(0x02, t2) ^ MUL(0x0b, t3) ^ - MUL(0x02, t4) ^ MUL(0x08, t5) ^ MUL(0x05, t6) ^ MUL(0x06, t7); - WRITE_ROW(0, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x09, t1) ^ MUL(0x08, t2) ^ MUL(0x0d, t3) ^ - MUL(0x07, t4) ^ MUL(0x07, t5) ^ MUL(0x05, t6) ^ MUL(0x02, t7); - WRITE_ROW(1, t8); - t8 = MUL(0x04, t0) ^ MUL(0x04, t1) ^ MUL(0x0d, t2) ^ MUL(0x0d, t3) ^ - MUL(0x09, t4) ^ MUL(0x04, t5) ^ MUL(0x0d, t6) ^ MUL(0x09, t7); - WRITE_ROW(2, t8); - t8 = MUL(0x01, t0) ^ MUL(0x06, t1) ^ MUL(0x05, t2) ^ MUL(0x01, t3) ^ - MUL(0x0c, t4) ^ MUL(0x0d, t5) ^ MUL(0x0f, t6) ^ MUL(0x0e, t7); - WRITE_ROW(3, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x0c, t1) ^ MUL(0x09, t2) ^ MUL(0x0d, t3) ^ - MUL(0x0e, t4) ^ MUL(0x05, t5) ^ MUL(0x0e, t6) ^ MUL(0x0d, t7); - WRITE_ROW(4, t8); - t8 = MUL(0x09, t0) ^ MUL(0x0e, t1) ^ MUL(0x05, t2) ^ MUL(0x0f, t3) ^ - MUL(0x04, t4) ^ MUL(0x0c, t5) ^ MUL(0x09, t6) ^ MUL(0x06, t7); - WRITE_ROW(5, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x02, t1) ^ MUL(0x02, t2) ^ MUL(0x0a, t3) ^ - MUL(0x03, t4) ^ MUL(0x01, t5) ^ MUL(0x01, t6) ^ MUL(0x0e, t7); - WRITE_ROW(6, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x01, t1) ^ MUL(0x0d, t2) ^ MUL(0x0a, t3) ^ - MUL(0x05, t4) ^ MUL(0x0a, t5) ^ MUL(0x02, t6) ^ MUL(0x03, t7); - WRITE_ROW(7, t8); - } - - /* Convert back from bit-sliced form to regular form */ - photon256_from_sliced(state, S.bytes); -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.h deleted file mode 100644 index ce8729a..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-photon256.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_PHOTON256_H -#define LW_INTERNAL_PHOTON256_H - -/** - * \file internal-photon256.h - * \brief Internal implementation of the PHOTON-256 permutation. - * - * Warning: The current implementation of PHOTON-256 is constant-time - * but not constant-cache. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the PHOTON-256 permutation state in bytes. - */ -#define PHOTON256_STATE_SIZE 32 - -/** - * \brief Permutes the PHOTON-256 state. - * - * \param state The state to be permuted. - */ -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-util.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.c b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.c deleted file mode 100644 index f44bdad..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.c +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "photon-beetle.h" -#include "internal-photon256.h" -#include "internal-util.h" -#include - -aead_cipher_t const photon_beetle_128_cipher = { - "PHOTON-Beetle-AEAD-ENC-128", - PHOTON_BEETLE_KEY_SIZE, - PHOTON_BEETLE_NONCE_SIZE, - PHOTON_BEETLE_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - photon_beetle_128_aead_encrypt, - photon_beetle_128_aead_decrypt -}; - -aead_cipher_t const photon_beetle_32_cipher = { - "PHOTON-Beetle-AEAD-ENC-32", - PHOTON_BEETLE_KEY_SIZE, - PHOTON_BEETLE_NONCE_SIZE, - PHOTON_BEETLE_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - photon_beetle_32_aead_encrypt, - photon_beetle_32_aead_decrypt -}; - -aead_hash_algorithm_t const photon_beetle_hash_algorithm = { - "PHOTON-Beetle-HASH", - sizeof(int), - PHOTON_BEETLE_HASH_SIZE, - AEAD_FLAG_NONE, - photon_beetle_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-128. - */ -#define PHOTON_BEETLE_128_RATE 16 - -/** - * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-32. - */ -#define PHOTON_BEETLE_32_RATE 4 - -/* Shifts a domain constant from the spec to the correct bit position */ -#define DOMAIN(c) ((c) << 5) - -/** - * \brief Processes the associated data for PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must be non-zero. - * \param rate Rate of absorption for the data. - * \param mempty Non-zero if the message is empty. - */ -static void photon_beetle_process_ad - (unsigned char state[PHOTON256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen, - unsigned rate, int mempty) -{ - unsigned temp; - - /* Absorb as many full rate blocks as possible */ - while (adlen > rate) { - photon256_permute(state); - lw_xor_block(state, ad, rate); - ad += rate; - adlen -= rate; - } - - /* Pad and absorb the last block */ - temp = (unsigned)adlen; - photon256_permute(state); - lw_xor_block(state, ad, temp); - if (temp < rate) - state[temp] ^= 0x01; /* padding */ - - /* Add the domain constant to finalize associated data processing */ - if (mempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(3); - else if (mempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(4); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -/** - * \brief Rotates part of the PHOTON-256 state right by one bit. - * - * \param out Output state buffer. - * \param in Input state buffer, must not overlap with \a out. - * \param len Length of the state buffer. - */ -static void photon_beetle_rotate1 - (unsigned char *out, const unsigned char *in, unsigned len) -{ - unsigned posn; - for (posn = 0; posn < (len - 1); ++posn) - out[posn] = (in[posn] >> 1) | (in[posn + 1] << 7); - out[len - 1] = (in[len - 1] >> 1) | (in[0] << 7); -} - -/** - * \brief Encrypts a plaintext block with PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Length of the message, must be non-zero. - * \param rate Rate of absorption for the data. - * \param adempty Non-zero if the associated data is empty. - */ -static void photon_beetle_encrypt - (unsigned char state[PHOTON256_STATE_SIZE], - unsigned char *c, const unsigned char *m, unsigned long long mlen, - unsigned rate, int adempty) -{ - unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ - unsigned temp; - - /* Process all plaintext blocks except the last */ - while (mlen > rate) { - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - lw_xor_block(state, m, rate); - lw_xor_block_2_src(c, m, shuffle, rate); - c += rate; - m += rate; - mlen -= rate; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - if (temp == rate) { - lw_xor_block(state, m, rate); - lw_xor_block_2_src(c, m, shuffle, rate); - } else { - lw_xor_block(state, m, temp); - state[temp] ^= 0x01; /* padding */ - lw_xor_block_2_src(c, m, shuffle, temp); - } - - /* Add the domain constant to finalize message processing */ - if (adempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); - else if (adempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -/** - * \brief Decrypts a ciphertext block with PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param mlen Length of the message, must be non-zero. - * \param rate Rate of absorption for the data. - * \param adempty Non-zero if the associated data is empty. - */ -static void photon_beetle_decrypt - (unsigned char state[PHOTON256_STATE_SIZE], - unsigned char *m, const unsigned char *c, unsigned long long mlen, - unsigned rate, int adempty) -{ - unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ - unsigned temp; - - /* Process all plaintext blocks except the last */ - while (mlen > rate) { - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - lw_xor_block_2_src(m, c, shuffle, rate); - lw_xor_block(state, m, rate); - c += rate; - m += rate; - mlen -= rate; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - if (temp == rate) { - lw_xor_block_2_src(m, c, shuffle, rate); - lw_xor_block(state, m, rate); - } else { - lw_xor_block_2_src(m, c, shuffle, temp); - lw_xor_block(state, m, temp); - state[temp] ^= 0x01; /* padding */ - } - - /* Add the domain constant to finalize message processing */ - if (adempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); - else if (adempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -int photon_beetle_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_128_RATE, mlen == 0); - } else if (mlen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - photon_beetle_encrypt - (state, c, m, mlen, PHOTON_BEETLE_128_RATE, adlen == 0); - } - - /* Generate the authentication tag */ - photon256_permute(state); - memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); - return 0; -} - -int photon_beetle_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < PHOTON_BEETLE_TAG_SIZE) - return -1; - *mlen = clen - PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - clen -= PHOTON_BEETLE_TAG_SIZE; - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_128_RATE, clen == 0); - } else if (clen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - photon_beetle_decrypt - (state, m, c, clen, PHOTON_BEETLE_128_RATE, adlen == 0); - } - - /* Check the authentication tag */ - photon256_permute(state); - return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); -} - -int photon_beetle_32_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_32_RATE, mlen == 0); - } else if (mlen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - photon_beetle_encrypt - (state, c, m, mlen, PHOTON_BEETLE_32_RATE, adlen == 0); - } - - /* Generate the authentication tag */ - photon256_permute(state); - memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); - return 0; -} - -int photon_beetle_32_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < PHOTON_BEETLE_TAG_SIZE) - return -1; - *mlen = clen - PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - clen -= PHOTON_BEETLE_TAG_SIZE; - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_32_RATE, clen == 0); - } else if (clen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - photon_beetle_decrypt - (state, m, c, clen, PHOTON_BEETLE_32_RATE, adlen == 0); - } - - /* Check the authentication tag */ - photon256_permute(state); - return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); -} - -int photon_beetle_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - unsigned temp; - - /* Absorb the input data */ - if (inlen == 0) { - /* No input data at all */ - memset(state, 0, sizeof(state) - 1); - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); - } else if (inlen <= PHOTON_BEETLE_128_RATE) { - /* Only one block of input data, which may require padding */ - temp = (unsigned)inlen; - memcpy(state, in, temp); - memset(state + temp, 0, sizeof(state) - temp - 1); - if (temp < PHOTON_BEETLE_128_RATE) { - state[temp] = 0x01; - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); - } else { - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(2); - } - } else { - /* Initialize the state with the first block, then absorb the rest */ - memcpy(state, in, PHOTON_BEETLE_128_RATE); - memset(state + PHOTON_BEETLE_128_RATE, 0, - sizeof(state) - PHOTON_BEETLE_128_RATE); - in += PHOTON_BEETLE_128_RATE; - inlen -= PHOTON_BEETLE_128_RATE; - while (inlen > PHOTON_BEETLE_32_RATE) { - photon256_permute(state); - lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); - in += PHOTON_BEETLE_32_RATE; - inlen -= PHOTON_BEETLE_32_RATE; - } - photon256_permute(state); - temp = (unsigned)inlen; - if (temp == PHOTON_BEETLE_32_RATE) { - lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } else { - lw_xor_block(state, in, temp); - state[temp] ^= 0x01; - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); - } - } - - /* Generate the output hash */ - photon256_permute(state); - memcpy(out, state, 16); - photon256_permute(state); - memcpy(out + 16, state, 16); - return 0; -} diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.h deleted file mode 100644 index 2d94a7e..0000000 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys-avr/photon-beetle.h +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_PHOTON_BEETLE_H -#define LWCRYPTO_PHOTON_BEETLE_H - -#include "aead-common.h" - -/** - * \file photon-beetle.h - * \brief PHOTON-Beetle authenticated encryption algorithm. - * - * PHOTON-Beetle is a family of authenticated encryption algorithms based - * on the PHOTON-256 permutation and using the Beetle sponge mode. - * There are three algorithms in the family: - * - * \li PHOTON-Beetle-AEAD-ENC-128 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag. Data is handled in 16 byte blocks. This is the primary - * member of the family for encryption. - * \li PHOTON-Beetle-AEAD-ENC-32 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag. Data is handled in 4 byte blocks. - * \li PHOTON-Beetle-Hash with a 256-bit hash output. The initial data is - * handled as a 16 byte block, and then the remaining bytes are processed - * in 4 byte blocks. - * - * References: https://www.isical.ac.in/~lightweight/beetle/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for PHOTON-Beetle-HASH. - */ -#define PHOTON_BEETLE_HASH_SIZE 32 - -/** - * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-128 cipher. - */ -extern aead_cipher_t const photon_beetle_128_cipher; - -/** - * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-32 cipher. - */ -extern aead_cipher_t const photon_beetle_32_cipher; - -/** - * \brief Meta-information block for the PHOTON-Beetle-HASH algorithm. - */ -extern aead_hash_algorithm_t const photon_beetle_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa photon_beetle_128_aead_decrypt() - */ -int photon_beetle_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa photon_beetle_128_aead_encrypt() - */ -int photon_beetle_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa photon_beetle_32_aead_decrypt() - */ -int photon_beetle_32_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa photon_beetle_32_aead_encrypt() - */ -int photon_beetle_32_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with PHOTON-Beetle-HASH to - * generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * PHOTON_BEETLE_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int photon_beetle_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys/internal-util.h b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys/internal-util.h +++ b/photon-beetle/Implementations/crypto_aead/photonbeetleaead128rate32v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/api.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/hash.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/hash.c deleted file mode 100644 index c75624a..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "photon-beetle.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return photon_beetle_hash(out, in, inlen); -} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.c deleted file mode 100644 index b8743fe..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.c +++ /dev/null @@ -1,479 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-photon256.h" -#include "internal-util.h" - -/** - * \brief Number of rounds in the PHOTON-256 permutation in bit-sliced form. - */ -#define PHOTON256_ROUNDS 12 - -/* Round constants for PHOTON-256 */ -static uint32_t const photon256_rc[PHOTON256_ROUNDS] = { - 0x96d2f0e1, 0xb4f0d2c3, 0xf0b49687, 0x692d0f1e, - 0x5a1e3c2d, 0x3c785a4b, 0xe1a58796, 0x4b0f2d3c, - 0x1e5a7869, 0xa5e1c3d2, 0xd296b4a5, 0x2d694b5a -}; - -/** - * \brief Evaluates the PHOTON-256 S-box in bit-sliced form. - * - * \param x0 Slice with bit 0 of all nibbles. - * \param x1 Slice with bit 1 of all nibbles. - * \param x2 Slice with bit 2 of all nibbles. - * \param x3 Slice with bit 3 of all nibbles. - * - * This bit-sliced S-box implementation is based on the AVR version - * "add_avr8_bitslice_asm" from the PHOTON-Beetle reference code. - */ -#define photon256_sbox(x0, x1, x2, x3) \ - do { \ - x1 ^= x2; \ - x3 ^= (x2 & x1); \ - t1 = x3; \ - x3 = (x3 & x1) ^ x2; \ - t2 = x3; \ - x3 ^= x0; \ - x3 = ~(x3); \ - x2 = x3; \ - t2 |= x0; \ - x0 ^= t1; \ - x1 ^= x0; \ - x2 |= x1; \ - x2 ^= t1; \ - x1 ^= t2; \ - x3 ^= x1; \ - } while (0) - -/** - * \brief Performs a field multiplication on the 8 nibbles in a row. - * - * \param a Field constant to multiply by. - * \param x Bit-sliced form of the row, with bits 0..3 of each nibble - * in bytes 0..3 of the word. - * - * \return a * x packed into the bytes of a word. - */ -static uint32_t photon256_field_multiply(uint8_t a, uint32_t x) -{ - /* For each 4-bit nibble we need to do this: - * - * result = 0; - * for (bit = 0; bit < 4; ++ bit) { - * if ((a & (1 << bit)) != 0) - * result ^= x; - * if ((x & 0x08) != 0) { - * x = (x << 1) ^ 3; - * } else { - * x = (x << 1); - * } - * } - * - * We don't need to worry about constant time for "a" because it is a - * known constant that isn't data-dependent. But we do need to worry - * about constant time for "x" as it is data. - */ - uint32_t result = 0; - uint32_t t; - #define PARALLEL_CONDITIONAL_ADD(bit) \ - do { \ - if ((a) & (1 << (bit))) \ - result ^= x; \ - } while (0) - #define PARALELL_ROTATE() \ - do { \ - t = x >> 24; \ - x = (x << 8) ^ t ^ (t << 8); \ - } while (0) - PARALLEL_CONDITIONAL_ADD(0); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(1); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(2); - PARALELL_ROTATE(); - PARALLEL_CONDITIONAL_ADD(3); - return result; -} - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/** - * \brief Converts a PHOTON-256 state into bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_to_sliced - (uint32_t out[PHOTON256_STATE_SIZE / 4], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* We first scatter bits 0..3 of the nibbles to bytes 0..3 of the words. - * Then we rearrange the bytes to group all bits N into word N. - * - * Permutation generated with "http://programming.sirrida.de/calcperm.php". - * - * P = [0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 - * 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31] - */ - uint32_t t0, t1, t2, t3; - #define TO_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - #define FROM_BITSLICED_PERM(x) \ - do { \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - } while (0) - t0 = le_load_word32(in); - t1 = le_load_word32(in + 4); - t2 = le_load_word32(in + 8); - t3 = le_load_word32(in + 12); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[0] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[1] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[2] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[3] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); - t0 = le_load_word32(in + 16); - t1 = le_load_word32(in + 20); - t2 = le_load_word32(in + 24); - t3 = le_load_word32(in + 28); - TO_BITSLICED_PERM(t0); - TO_BITSLICED_PERM(t1); - TO_BITSLICED_PERM(t2); - TO_BITSLICED_PERM(t3); - out[4] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | - ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); - out[5] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | - ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); - out[6] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | - (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); - out[7] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | - ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); -} - -/** - * \brief Converts a PHOTON-256 state from bit-sliced form. - * - * \param out Points to the converted output. - * \param in Points to the PHOTON-256 state to convert. - */ -static void photon256_from_sliced - (unsigned char out[PHOTON256_STATE_SIZE], - const unsigned char in[PHOTON256_STATE_SIZE]) -{ - /* Do the reverse of photon256_to_sliced() */ - uint32_t x0, x1, x2, x3; - x0 = ((uint32_t)(in[0])) | - (((uint32_t)(in[4])) << 8) | - (((uint32_t)(in[8])) << 16) | - (((uint32_t)(in[12])) << 24); - x1 = ((uint32_t)(in[1])) | - (((uint32_t)(in[5])) << 8) | - (((uint32_t)(in[9])) << 16) | - (((uint32_t)(in[13])) << 24); - x2 = ((uint32_t)(in[2])) | - (((uint32_t)(in[6])) << 8) | - (((uint32_t)(in[10])) << 16) | - (((uint32_t)(in[14])) << 24); - x3 = ((uint32_t)(in[3])) | - (((uint32_t)(in[7])) << 8) | - (((uint32_t)(in[11])) << 16) | - (((uint32_t)(in[15])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out, x0); - le_store_word32(out + 4, x1); - le_store_word32(out + 8, x2); - le_store_word32(out + 12, x3); - x0 = ((uint32_t)(in[16])) | - (((uint32_t)(in[20])) << 8) | - (((uint32_t)(in[24])) << 16) | - (((uint32_t)(in[28])) << 24); - x1 = ((uint32_t)(in[17])) | - (((uint32_t)(in[21])) << 8) | - (((uint32_t)(in[25])) << 16) | - (((uint32_t)(in[29])) << 24); - x2 = ((uint32_t)(in[18])) | - (((uint32_t)(in[22])) << 8) | - (((uint32_t)(in[26])) << 16) | - (((uint32_t)(in[30])) << 24); - x3 = ((uint32_t)(in[19])) | - (((uint32_t)(in[23])) << 8) | - (((uint32_t)(in[27])) << 16) | - (((uint32_t)(in[31])) << 24); - FROM_BITSLICED_PERM(x0); - FROM_BITSLICED_PERM(x1); - FROM_BITSLICED_PERM(x2); - FROM_BITSLICED_PERM(x3); - le_store_word32(out + 16, x0); - le_store_word32(out + 20, x1); - le_store_word32(out + 24, x2); - le_store_word32(out + 28, x3); -} - -#if defined(LW_UTIL_LITTLE_ENDIAN) -/* Index the bit-sliced state bytes in little-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[(row)] = (uint8_t)(value); \ - S.bytes[(row) + 4] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 8] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 12] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[(row) + 12] = (uint8_t)(value); \ - S.bytes[(row) + 16] = (uint8_t)((value) >> 8); \ - S.bytes[(row) + 20] = (uint8_t)((value) >> 16); \ - S.bytes[(row) + 24] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#else -/* Index the bit-sliced state bytes in big-endian byte order */ -#define READ_ROW0() \ - (((uint32_t)(S.bytes[3])) | \ - (((uint32_t)(S.bytes[7])) << 8) | \ - (((uint32_t)(S.bytes[11])) << 16) | \ - (((uint32_t)(S.bytes[15])) << 24)) -#define READ_ROW1() \ - (((uint32_t)(S.bytes[2])) | \ - (((uint32_t)(S.bytes[6])) << 8) | \ - (((uint32_t)(S.bytes[10])) << 16) | \ - (((uint32_t)(S.bytes[14])) << 24)) -#define READ_ROW2() \ - (((uint32_t)(S.bytes[1])) | \ - (((uint32_t)(S.bytes[5])) << 8) | \ - (((uint32_t)(S.bytes[9])) << 16) | \ - (((uint32_t)(S.bytes[13])) << 24)) -#define READ_ROW3() \ - (((uint32_t)(S.bytes[0])) | \ - (((uint32_t)(S.bytes[4])) << 8) | \ - (((uint32_t)(S.bytes[8])) << 16) | \ - (((uint32_t)(S.bytes[12])) << 24)) -#define READ_ROW4() \ - (((uint32_t)(S.bytes[19])) | \ - (((uint32_t)(S.bytes[23])) << 8) | \ - (((uint32_t)(S.bytes[27])) << 16) | \ - (((uint32_t)(S.bytes[31])) << 24)) -#define READ_ROW5() \ - (((uint32_t)(S.bytes[18])) | \ - (((uint32_t)(S.bytes[22])) << 8) | \ - (((uint32_t)(S.bytes[26])) << 16) | \ - (((uint32_t)(S.bytes[30])) << 24)) -#define READ_ROW6() \ - (((uint32_t)(S.bytes[17])) | \ - (((uint32_t)(S.bytes[21])) << 8) | \ - (((uint32_t)(S.bytes[25])) << 16) | \ - (((uint32_t)(S.bytes[29])) << 24)) -#define READ_ROW7() \ - (((uint32_t)(S.bytes[16])) | \ - (((uint32_t)(S.bytes[20])) << 8) | \ - (((uint32_t)(S.bytes[24])) << 16) | \ - (((uint32_t)(S.bytes[28])) << 24)) -#define WRITE_ROW(row, value) \ - do { \ - if ((row) < 4) { \ - S.bytes[3 - (row)] = (uint8_t)(value); \ - S.bytes[7 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[11 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[15 - (row)] = (uint8_t)((value) >> 24); \ - } else { \ - S.bytes[20 - (row)] = (uint8_t)(value); \ - S.bytes[24 - (row)] = (uint8_t)((value) >> 8); \ - S.bytes[28 - (row)] = (uint8_t)((value) >> 16); \ - S.bytes[32 - (row)] = (uint8_t)((value) >> 24); \ - } \ - } while (0) -#endif - -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]) -{ - union { - uint32_t words[PHOTON256_STATE_SIZE / 4]; - uint8_t bytes[PHOTON256_STATE_SIZE]; - } S; - uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8; - uint8_t round; - - /* Convert the state into bit-sliced form */ - photon256_to_sliced(S.words, state); - - /* Perform all 12 permutation rounds */ - for (round = 0; round < PHOTON256_ROUNDS; ++round) { - /* Add the constants for this round */ - t0 = photon256_rc[round]; - S.words[0] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[1] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[2] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[3] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[4] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[5] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[6] ^= t0 & 0x01010101U; - t0 >>= 1; - S.words[7] ^= t0 & 0x01010101U; - - /* Apply the sbox to all nibbles in the state */ - photon256_sbox(S.words[0], S.words[1], S.words[2], S.words[3]); - photon256_sbox(S.words[4], S.words[5], S.words[6], S.words[7]); - - /* Rotate all rows left by the row number. - * - * We do this by applying permutations to the top and bottom words - * to rearrange the bits into the rotated form. Permutations - * generated with "http://programming.sirrida.de/calcperm.php". - * - * P_top = [0 1 2 3 4 5 6 7 15 8 9 10 11 12 13 14 22 23 - * 16 17 18 19 20 21 29 30 31 24 25 26 27 28] - * P_bot = [4 5 6 7 0 1 2 3 11 12 13 14 15 8 9 10 18 19 - * 20 21 22 23 16 17 25 26 27 28 29 30 31 24 - */ - #define TOP_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x07030100, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - #define BOTTOM_ROTATE_PERM(x) \ - do { \ - t1 = (x); \ - bit_permute_step(t1, 0x080c0e0f, 4); \ - bit_permute_step(t1, 0x22331100, 2); \ - bit_permute_step(t1, 0x55005500, 1); \ - (x) = t1; \ - } while (0) - TOP_ROTATE_PERM(S.words[0]); - TOP_ROTATE_PERM(S.words[1]); - TOP_ROTATE_PERM(S.words[2]); - TOP_ROTATE_PERM(S.words[3]); - BOTTOM_ROTATE_PERM(S.words[4]); - BOTTOM_ROTATE_PERM(S.words[5]); - BOTTOM_ROTATE_PERM(S.words[6]); - BOTTOM_ROTATE_PERM(S.words[7]); - - /* Mix the columns */ - #define MUL(a, x) (photon256_field_multiply((a), (x))) - t0 = READ_ROW0(); - t1 = READ_ROW1(); - t2 = READ_ROW2(); - t3 = READ_ROW3(); - t4 = READ_ROW4(); - t5 = READ_ROW5(); - t6 = READ_ROW6(); - t7 = READ_ROW7(); - t8 = MUL(0x02, t0) ^ MUL(0x04, t1) ^ MUL(0x02, t2) ^ MUL(0x0b, t3) ^ - MUL(0x02, t4) ^ MUL(0x08, t5) ^ MUL(0x05, t6) ^ MUL(0x06, t7); - WRITE_ROW(0, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x09, t1) ^ MUL(0x08, t2) ^ MUL(0x0d, t3) ^ - MUL(0x07, t4) ^ MUL(0x07, t5) ^ MUL(0x05, t6) ^ MUL(0x02, t7); - WRITE_ROW(1, t8); - t8 = MUL(0x04, t0) ^ MUL(0x04, t1) ^ MUL(0x0d, t2) ^ MUL(0x0d, t3) ^ - MUL(0x09, t4) ^ MUL(0x04, t5) ^ MUL(0x0d, t6) ^ MUL(0x09, t7); - WRITE_ROW(2, t8); - t8 = MUL(0x01, t0) ^ MUL(0x06, t1) ^ MUL(0x05, t2) ^ MUL(0x01, t3) ^ - MUL(0x0c, t4) ^ MUL(0x0d, t5) ^ MUL(0x0f, t6) ^ MUL(0x0e, t7); - WRITE_ROW(3, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x0c, t1) ^ MUL(0x09, t2) ^ MUL(0x0d, t3) ^ - MUL(0x0e, t4) ^ MUL(0x05, t5) ^ MUL(0x0e, t6) ^ MUL(0x0d, t7); - WRITE_ROW(4, t8); - t8 = MUL(0x09, t0) ^ MUL(0x0e, t1) ^ MUL(0x05, t2) ^ MUL(0x0f, t3) ^ - MUL(0x04, t4) ^ MUL(0x0c, t5) ^ MUL(0x09, t6) ^ MUL(0x06, t7); - WRITE_ROW(5, t8); - t8 = MUL(0x0c, t0) ^ MUL(0x02, t1) ^ MUL(0x02, t2) ^ MUL(0x0a, t3) ^ - MUL(0x03, t4) ^ MUL(0x01, t5) ^ MUL(0x01, t6) ^ MUL(0x0e, t7); - WRITE_ROW(6, t8); - t8 = MUL(0x0f, t0) ^ MUL(0x01, t1) ^ MUL(0x0d, t2) ^ MUL(0x0a, t3) ^ - MUL(0x05, t4) ^ MUL(0x0a, t5) ^ MUL(0x02, t6) ^ MUL(0x03, t7); - WRITE_ROW(7, t8); - } - - /* Convert back from bit-sliced form to regular form */ - photon256_from_sliced(state, S.bytes); -} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.h deleted file mode 100644 index ce8729a..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-photon256.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_PHOTON256_H -#define LW_INTERNAL_PHOTON256_H - -/** - * \file internal-photon256.h - * \brief Internal implementation of the PHOTON-256 permutation. - * - * Warning: The current implementation of PHOTON-256 is constant-time - * but not constant-cache. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the PHOTON-256 permutation state in bytes. - */ -#define PHOTON256_STATE_SIZE 32 - -/** - * \brief Permutes the PHOTON-256 state. - * - * \param state The state to be permuted. - */ -void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-util.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.c deleted file mode 100644 index f44bdad..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.c +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "photon-beetle.h" -#include "internal-photon256.h" -#include "internal-util.h" -#include - -aead_cipher_t const photon_beetle_128_cipher = { - "PHOTON-Beetle-AEAD-ENC-128", - PHOTON_BEETLE_KEY_SIZE, - PHOTON_BEETLE_NONCE_SIZE, - PHOTON_BEETLE_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - photon_beetle_128_aead_encrypt, - photon_beetle_128_aead_decrypt -}; - -aead_cipher_t const photon_beetle_32_cipher = { - "PHOTON-Beetle-AEAD-ENC-32", - PHOTON_BEETLE_KEY_SIZE, - PHOTON_BEETLE_NONCE_SIZE, - PHOTON_BEETLE_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - photon_beetle_32_aead_encrypt, - photon_beetle_32_aead_decrypt -}; - -aead_hash_algorithm_t const photon_beetle_hash_algorithm = { - "PHOTON-Beetle-HASH", - sizeof(int), - PHOTON_BEETLE_HASH_SIZE, - AEAD_FLAG_NONE, - photon_beetle_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-128. - */ -#define PHOTON_BEETLE_128_RATE 16 - -/** - * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-32. - */ -#define PHOTON_BEETLE_32_RATE 4 - -/* Shifts a domain constant from the spec to the correct bit position */ -#define DOMAIN(c) ((c) << 5) - -/** - * \brief Processes the associated data for PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data, must be non-zero. - * \param rate Rate of absorption for the data. - * \param mempty Non-zero if the message is empty. - */ -static void photon_beetle_process_ad - (unsigned char state[PHOTON256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen, - unsigned rate, int mempty) -{ - unsigned temp; - - /* Absorb as many full rate blocks as possible */ - while (adlen > rate) { - photon256_permute(state); - lw_xor_block(state, ad, rate); - ad += rate; - adlen -= rate; - } - - /* Pad and absorb the last block */ - temp = (unsigned)adlen; - photon256_permute(state); - lw_xor_block(state, ad, temp); - if (temp < rate) - state[temp] ^= 0x01; /* padding */ - - /* Add the domain constant to finalize associated data processing */ - if (mempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(3); - else if (mempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(4); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -/** - * \brief Rotates part of the PHOTON-256 state right by one bit. - * - * \param out Output state buffer. - * \param in Input state buffer, must not overlap with \a out. - * \param len Length of the state buffer. - */ -static void photon_beetle_rotate1 - (unsigned char *out, const unsigned char *in, unsigned len) -{ - unsigned posn; - for (posn = 0; posn < (len - 1); ++posn) - out[posn] = (in[posn] >> 1) | (in[posn + 1] << 7); - out[len - 1] = (in[len - 1] >> 1) | (in[0] << 7); -} - -/** - * \brief Encrypts a plaintext block with PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Length of the message, must be non-zero. - * \param rate Rate of absorption for the data. - * \param adempty Non-zero if the associated data is empty. - */ -static void photon_beetle_encrypt - (unsigned char state[PHOTON256_STATE_SIZE], - unsigned char *c, const unsigned char *m, unsigned long long mlen, - unsigned rate, int adempty) -{ - unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ - unsigned temp; - - /* Process all plaintext blocks except the last */ - while (mlen > rate) { - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - lw_xor_block(state, m, rate); - lw_xor_block_2_src(c, m, shuffle, rate); - c += rate; - m += rate; - mlen -= rate; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - if (temp == rate) { - lw_xor_block(state, m, rate); - lw_xor_block_2_src(c, m, shuffle, rate); - } else { - lw_xor_block(state, m, temp); - state[temp] ^= 0x01; /* padding */ - lw_xor_block_2_src(c, m, shuffle, temp); - } - - /* Add the domain constant to finalize message processing */ - if (adempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); - else if (adempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -/** - * \brief Decrypts a ciphertext block with PHOTON-Beetle. - * - * \param state PHOTON-256 permutation state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param mlen Length of the message, must be non-zero. - * \param rate Rate of absorption for the data. - * \param adempty Non-zero if the associated data is empty. - */ -static void photon_beetle_decrypt - (unsigned char state[PHOTON256_STATE_SIZE], - unsigned char *m, const unsigned char *c, unsigned long long mlen, - unsigned rate, int adempty) -{ - unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ - unsigned temp; - - /* Process all plaintext blocks except the last */ - while (mlen > rate) { - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - lw_xor_block_2_src(m, c, shuffle, rate); - lw_xor_block(state, m, rate); - c += rate; - m += rate; - mlen -= rate; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - photon256_permute(state); - memcpy(shuffle, state + rate / 2, rate / 2); - photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); - if (temp == rate) { - lw_xor_block_2_src(m, c, shuffle, rate); - lw_xor_block(state, m, rate); - } else { - lw_xor_block_2_src(m, c, shuffle, temp); - lw_xor_block(state, m, temp); - state[temp] ^= 0x01; /* padding */ - } - - /* Add the domain constant to finalize message processing */ - if (adempty && temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); - else if (adempty) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); - else if (temp == rate) - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - else - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); -} - -int photon_beetle_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_128_RATE, mlen == 0); - } else if (mlen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - photon_beetle_encrypt - (state, c, m, mlen, PHOTON_BEETLE_128_RATE, adlen == 0); - } - - /* Generate the authentication tag */ - photon256_permute(state); - memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); - return 0; -} - -int photon_beetle_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < PHOTON_BEETLE_TAG_SIZE) - return -1; - *mlen = clen - PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - clen -= PHOTON_BEETLE_TAG_SIZE; - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_128_RATE, clen == 0); - } else if (clen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - photon_beetle_decrypt - (state, m, c, clen, PHOTON_BEETLE_128_RATE, adlen == 0); - } - - /* Check the authentication tag */ - photon256_permute(state); - return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); -} - -int photon_beetle_32_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_32_RATE, mlen == 0); - } else if (mlen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - photon_beetle_encrypt - (state, c, m, mlen, PHOTON_BEETLE_32_RATE, adlen == 0); - } - - /* Generate the authentication tag */ - photon256_permute(state); - memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); - return 0; -} - -int photon_beetle_32_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < PHOTON_BEETLE_TAG_SIZE) - return -1; - *mlen = clen - PHOTON_BEETLE_TAG_SIZE; - - /* Initialize the state by concatenating the nonce and the key */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Process the associated data */ - clen -= PHOTON_BEETLE_TAG_SIZE; - if (adlen > 0) { - photon_beetle_process_ad - (state, ad, adlen, PHOTON_BEETLE_32_RATE, clen == 0); - } else if (clen == 0) { - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } - - /* Decrypt the ciphertext to produce the plaintext */ - if (clen > 0) { - photon_beetle_decrypt - (state, m, c, clen, PHOTON_BEETLE_32_RATE, adlen == 0); - } - - /* Check the authentication tag */ - photon256_permute(state); - return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); -} - -int photon_beetle_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[PHOTON256_STATE_SIZE]; - unsigned temp; - - /* Absorb the input data */ - if (inlen == 0) { - /* No input data at all */ - memset(state, 0, sizeof(state) - 1); - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); - } else if (inlen <= PHOTON_BEETLE_128_RATE) { - /* Only one block of input data, which may require padding */ - temp = (unsigned)inlen; - memcpy(state, in, temp); - memset(state + temp, 0, sizeof(state) - temp - 1); - if (temp < PHOTON_BEETLE_128_RATE) { - state[temp] = 0x01; - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); - } else { - state[PHOTON256_STATE_SIZE - 1] = DOMAIN(2); - } - } else { - /* Initialize the state with the first block, then absorb the rest */ - memcpy(state, in, PHOTON_BEETLE_128_RATE); - memset(state + PHOTON_BEETLE_128_RATE, 0, - sizeof(state) - PHOTON_BEETLE_128_RATE); - in += PHOTON_BEETLE_128_RATE; - inlen -= PHOTON_BEETLE_128_RATE; - while (inlen > PHOTON_BEETLE_32_RATE) { - photon256_permute(state); - lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); - in += PHOTON_BEETLE_32_RATE; - inlen -= PHOTON_BEETLE_32_RATE; - } - photon256_permute(state); - temp = (unsigned)inlen; - if (temp == PHOTON_BEETLE_32_RATE) { - lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); - } else { - lw_xor_block(state, in, temp); - state[temp] ^= 0x01; - state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); - } - } - - /* Generate the output hash */ - photon256_permute(state); - memcpy(out, state, 16); - photon256_permute(state); - memcpy(out + 16, state, 16); - return 0; -} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.h deleted file mode 100644 index 2d94a7e..0000000 --- a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys-avr/photon-beetle.h +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_PHOTON_BEETLE_H -#define LWCRYPTO_PHOTON_BEETLE_H - -#include "aead-common.h" - -/** - * \file photon-beetle.h - * \brief PHOTON-Beetle authenticated encryption algorithm. - * - * PHOTON-Beetle is a family of authenticated encryption algorithms based - * on the PHOTON-256 permutation and using the Beetle sponge mode. - * There are three algorithms in the family: - * - * \li PHOTON-Beetle-AEAD-ENC-128 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag. Data is handled in 16 byte blocks. This is the primary - * member of the family for encryption. - * \li PHOTON-Beetle-AEAD-ENC-32 with a 128-bit key, a 128-bit nonce, and a - * 128-bit tag. Data is handled in 4 byte blocks. - * \li PHOTON-Beetle-Hash with a 256-bit hash output. The initial data is - * handled as a 16 byte block, and then the remaining bytes are processed - * in 4 byte blocks. - * - * References: https://www.isical.ac.in/~lightweight/beetle/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_TAG_SIZE 16 - -/** - * \brief Size of the nonce for PHOTON-Beetle. - */ -#define PHOTON_BEETLE_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for PHOTON-Beetle-HASH. - */ -#define PHOTON_BEETLE_HASH_SIZE 32 - -/** - * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-128 cipher. - */ -extern aead_cipher_t const photon_beetle_128_cipher; - -/** - * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-32 cipher. - */ -extern aead_cipher_t const photon_beetle_32_cipher; - -/** - * \brief Meta-information block for the PHOTON-Beetle-HASH algorithm. - */ -extern aead_hash_algorithm_t const photon_beetle_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa photon_beetle_128_aead_decrypt() - */ -int photon_beetle_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa photon_beetle_128_aead_encrypt() - */ -int photon_beetle_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa photon_beetle_32_aead_decrypt() - */ -int photon_beetle_32_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa photon_beetle_32_aead_encrypt() - */ -int photon_beetle_32_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with PHOTON-Beetle-HASH to - * generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * PHOTON_BEETLE_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int photon_beetle_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/api.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/hash.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/hash.c new file mode 100644 index 0000000..c75624a --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "photon-beetle.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return photon_beetle_hash(out, in, inlen); +} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.c new file mode 100644 index 0000000..b8743fe --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.c @@ -0,0 +1,479 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-photon256.h" +#include "internal-util.h" + +/** + * \brief Number of rounds in the PHOTON-256 permutation in bit-sliced form. + */ +#define PHOTON256_ROUNDS 12 + +/* Round constants for PHOTON-256 */ +static uint32_t const photon256_rc[PHOTON256_ROUNDS] = { + 0x96d2f0e1, 0xb4f0d2c3, 0xf0b49687, 0x692d0f1e, + 0x5a1e3c2d, 0x3c785a4b, 0xe1a58796, 0x4b0f2d3c, + 0x1e5a7869, 0xa5e1c3d2, 0xd296b4a5, 0x2d694b5a +}; + +/** + * \brief Evaluates the PHOTON-256 S-box in bit-sliced form. + * + * \param x0 Slice with bit 0 of all nibbles. + * \param x1 Slice with bit 1 of all nibbles. + * \param x2 Slice with bit 2 of all nibbles. + * \param x3 Slice with bit 3 of all nibbles. + * + * This bit-sliced S-box implementation is based on the AVR version + * "add_avr8_bitslice_asm" from the PHOTON-Beetle reference code. + */ +#define photon256_sbox(x0, x1, x2, x3) \ + do { \ + x1 ^= x2; \ + x3 ^= (x2 & x1); \ + t1 = x3; \ + x3 = (x3 & x1) ^ x2; \ + t2 = x3; \ + x3 ^= x0; \ + x3 = ~(x3); \ + x2 = x3; \ + t2 |= x0; \ + x0 ^= t1; \ + x1 ^= x0; \ + x2 |= x1; \ + x2 ^= t1; \ + x1 ^= t2; \ + x3 ^= x1; \ + } while (0) + +/** + * \brief Performs a field multiplication on the 8 nibbles in a row. + * + * \param a Field constant to multiply by. + * \param x Bit-sliced form of the row, with bits 0..3 of each nibble + * in bytes 0..3 of the word. + * + * \return a * x packed into the bytes of a word. + */ +static uint32_t photon256_field_multiply(uint8_t a, uint32_t x) +{ + /* For each 4-bit nibble we need to do this: + * + * result = 0; + * for (bit = 0; bit < 4; ++ bit) { + * if ((a & (1 << bit)) != 0) + * result ^= x; + * if ((x & 0x08) != 0) { + * x = (x << 1) ^ 3; + * } else { + * x = (x << 1); + * } + * } + * + * We don't need to worry about constant time for "a" because it is a + * known constant that isn't data-dependent. But we do need to worry + * about constant time for "x" as it is data. + */ + uint32_t result = 0; + uint32_t t; + #define PARALLEL_CONDITIONAL_ADD(bit) \ + do { \ + if ((a) & (1 << (bit))) \ + result ^= x; \ + } while (0) + #define PARALELL_ROTATE() \ + do { \ + t = x >> 24; \ + x = (x << 8) ^ t ^ (t << 8); \ + } while (0) + PARALLEL_CONDITIONAL_ADD(0); + PARALELL_ROTATE(); + PARALLEL_CONDITIONAL_ADD(1); + PARALELL_ROTATE(); + PARALLEL_CONDITIONAL_ADD(2); + PARALELL_ROTATE(); + PARALLEL_CONDITIONAL_ADD(3); + return result; +} + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/** + * \brief Converts a PHOTON-256 state into bit-sliced form. + * + * \param out Points to the converted output. + * \param in Points to the PHOTON-256 state to convert. + */ +static void photon256_to_sliced + (uint32_t out[PHOTON256_STATE_SIZE / 4], + const unsigned char in[PHOTON256_STATE_SIZE]) +{ + /* We first scatter bits 0..3 of the nibbles to bytes 0..3 of the words. + * Then we rearrange the bytes to group all bits N into word N. + * + * Permutation generated with "http://programming.sirrida.de/calcperm.php". + * + * P = [0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 + * 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31] + */ + uint32_t t0, t1, t2, t3; + #define TO_BITSLICED_PERM(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + } while (0) + #define FROM_BITSLICED_PERM(x) \ + do { \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + } while (0) + t0 = le_load_word32(in); + t1 = le_load_word32(in + 4); + t2 = le_load_word32(in + 8); + t3 = le_load_word32(in + 12); + TO_BITSLICED_PERM(t0); + TO_BITSLICED_PERM(t1); + TO_BITSLICED_PERM(t2); + TO_BITSLICED_PERM(t3); + out[0] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | + ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); + out[1] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | + ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); + out[2] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | + (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); + out[3] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | + ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); + t0 = le_load_word32(in + 16); + t1 = le_load_word32(in + 20); + t2 = le_load_word32(in + 24); + t3 = le_load_word32(in + 28); + TO_BITSLICED_PERM(t0); + TO_BITSLICED_PERM(t1); + TO_BITSLICED_PERM(t2); + TO_BITSLICED_PERM(t3); + out[4] = (t0 & 0x000000FFU) | ((t1 << 8) & 0x0000FF00U) | + ((t2 << 16) & 0x00FF0000U) | ((t3 << 24) & 0xFF000000U); + out[5] = ((t0 >> 8) & 0x000000FFU) | (t1 & 0x0000FF00U) | + ((t2 << 8) & 0x00FF0000U) | ((t3 << 16) & 0xFF000000U); + out[6] = ((t0 >> 16) & 0x000000FFU) | ((t1 >> 8) & 0x0000FF00U) | + (t2 & 0x00FF0000U) | ((t3 << 8) & 0xFF000000U); + out[7] = ((t0 >> 24) & 0x000000FFU) | ((t1 >> 16) & 0x0000FF00U) | + ((t2 >> 8) & 0x00FF0000U) | (t3 & 0xFF000000U); +} + +/** + * \brief Converts a PHOTON-256 state from bit-sliced form. + * + * \param out Points to the converted output. + * \param in Points to the PHOTON-256 state to convert. + */ +static void photon256_from_sliced + (unsigned char out[PHOTON256_STATE_SIZE], + const unsigned char in[PHOTON256_STATE_SIZE]) +{ + /* Do the reverse of photon256_to_sliced() */ + uint32_t x0, x1, x2, x3; + x0 = ((uint32_t)(in[0])) | + (((uint32_t)(in[4])) << 8) | + (((uint32_t)(in[8])) << 16) | + (((uint32_t)(in[12])) << 24); + x1 = ((uint32_t)(in[1])) | + (((uint32_t)(in[5])) << 8) | + (((uint32_t)(in[9])) << 16) | + (((uint32_t)(in[13])) << 24); + x2 = ((uint32_t)(in[2])) | + (((uint32_t)(in[6])) << 8) | + (((uint32_t)(in[10])) << 16) | + (((uint32_t)(in[14])) << 24); + x3 = ((uint32_t)(in[3])) | + (((uint32_t)(in[7])) << 8) | + (((uint32_t)(in[11])) << 16) | + (((uint32_t)(in[15])) << 24); + FROM_BITSLICED_PERM(x0); + FROM_BITSLICED_PERM(x1); + FROM_BITSLICED_PERM(x2); + FROM_BITSLICED_PERM(x3); + le_store_word32(out, x0); + le_store_word32(out + 4, x1); + le_store_word32(out + 8, x2); + le_store_word32(out + 12, x3); + x0 = ((uint32_t)(in[16])) | + (((uint32_t)(in[20])) << 8) | + (((uint32_t)(in[24])) << 16) | + (((uint32_t)(in[28])) << 24); + x1 = ((uint32_t)(in[17])) | + (((uint32_t)(in[21])) << 8) | + (((uint32_t)(in[25])) << 16) | + (((uint32_t)(in[29])) << 24); + x2 = ((uint32_t)(in[18])) | + (((uint32_t)(in[22])) << 8) | + (((uint32_t)(in[26])) << 16) | + (((uint32_t)(in[30])) << 24); + x3 = ((uint32_t)(in[19])) | + (((uint32_t)(in[23])) << 8) | + (((uint32_t)(in[27])) << 16) | + (((uint32_t)(in[31])) << 24); + FROM_BITSLICED_PERM(x0); + FROM_BITSLICED_PERM(x1); + FROM_BITSLICED_PERM(x2); + FROM_BITSLICED_PERM(x3); + le_store_word32(out + 16, x0); + le_store_word32(out + 20, x1); + le_store_word32(out + 24, x2); + le_store_word32(out + 28, x3); +} + +#if defined(LW_UTIL_LITTLE_ENDIAN) +/* Index the bit-sliced state bytes in little-endian byte order */ +#define READ_ROW0() \ + (((uint32_t)(S.bytes[0])) | \ + (((uint32_t)(S.bytes[4])) << 8) | \ + (((uint32_t)(S.bytes[8])) << 16) | \ + (((uint32_t)(S.bytes[12])) << 24)) +#define READ_ROW1() \ + (((uint32_t)(S.bytes[1])) | \ + (((uint32_t)(S.bytes[5])) << 8) | \ + (((uint32_t)(S.bytes[9])) << 16) | \ + (((uint32_t)(S.bytes[13])) << 24)) +#define READ_ROW2() \ + (((uint32_t)(S.bytes[2])) | \ + (((uint32_t)(S.bytes[6])) << 8) | \ + (((uint32_t)(S.bytes[10])) << 16) | \ + (((uint32_t)(S.bytes[14])) << 24)) +#define READ_ROW3() \ + (((uint32_t)(S.bytes[3])) | \ + (((uint32_t)(S.bytes[7])) << 8) | \ + (((uint32_t)(S.bytes[11])) << 16) | \ + (((uint32_t)(S.bytes[15])) << 24)) +#define READ_ROW4() \ + (((uint32_t)(S.bytes[16])) | \ + (((uint32_t)(S.bytes[20])) << 8) | \ + (((uint32_t)(S.bytes[24])) << 16) | \ + (((uint32_t)(S.bytes[28])) << 24)) +#define READ_ROW5() \ + (((uint32_t)(S.bytes[17])) | \ + (((uint32_t)(S.bytes[21])) << 8) | \ + (((uint32_t)(S.bytes[25])) << 16) | \ + (((uint32_t)(S.bytes[29])) << 24)) +#define READ_ROW6() \ + (((uint32_t)(S.bytes[18])) | \ + (((uint32_t)(S.bytes[22])) << 8) | \ + (((uint32_t)(S.bytes[26])) << 16) | \ + (((uint32_t)(S.bytes[30])) << 24)) +#define READ_ROW7() \ + (((uint32_t)(S.bytes[19])) | \ + (((uint32_t)(S.bytes[23])) << 8) | \ + (((uint32_t)(S.bytes[27])) << 16) | \ + (((uint32_t)(S.bytes[31])) << 24)) +#define WRITE_ROW(row, value) \ + do { \ + if ((row) < 4) { \ + S.bytes[(row)] = (uint8_t)(value); \ + S.bytes[(row) + 4] = (uint8_t)((value) >> 8); \ + S.bytes[(row) + 8] = (uint8_t)((value) >> 16); \ + S.bytes[(row) + 12] = (uint8_t)((value) >> 24); \ + } else { \ + S.bytes[(row) + 12] = (uint8_t)(value); \ + S.bytes[(row) + 16] = (uint8_t)((value) >> 8); \ + S.bytes[(row) + 20] = (uint8_t)((value) >> 16); \ + S.bytes[(row) + 24] = (uint8_t)((value) >> 24); \ + } \ + } while (0) +#else +/* Index the bit-sliced state bytes in big-endian byte order */ +#define READ_ROW0() \ + (((uint32_t)(S.bytes[3])) | \ + (((uint32_t)(S.bytes[7])) << 8) | \ + (((uint32_t)(S.bytes[11])) << 16) | \ + (((uint32_t)(S.bytes[15])) << 24)) +#define READ_ROW1() \ + (((uint32_t)(S.bytes[2])) | \ + (((uint32_t)(S.bytes[6])) << 8) | \ + (((uint32_t)(S.bytes[10])) << 16) | \ + (((uint32_t)(S.bytes[14])) << 24)) +#define READ_ROW2() \ + (((uint32_t)(S.bytes[1])) | \ + (((uint32_t)(S.bytes[5])) << 8) | \ + (((uint32_t)(S.bytes[9])) << 16) | \ + (((uint32_t)(S.bytes[13])) << 24)) +#define READ_ROW3() \ + (((uint32_t)(S.bytes[0])) | \ + (((uint32_t)(S.bytes[4])) << 8) | \ + (((uint32_t)(S.bytes[8])) << 16) | \ + (((uint32_t)(S.bytes[12])) << 24)) +#define READ_ROW4() \ + (((uint32_t)(S.bytes[19])) | \ + (((uint32_t)(S.bytes[23])) << 8) | \ + (((uint32_t)(S.bytes[27])) << 16) | \ + (((uint32_t)(S.bytes[31])) << 24)) +#define READ_ROW5() \ + (((uint32_t)(S.bytes[18])) | \ + (((uint32_t)(S.bytes[22])) << 8) | \ + (((uint32_t)(S.bytes[26])) << 16) | \ + (((uint32_t)(S.bytes[30])) << 24)) +#define READ_ROW6() \ + (((uint32_t)(S.bytes[17])) | \ + (((uint32_t)(S.bytes[21])) << 8) | \ + (((uint32_t)(S.bytes[25])) << 16) | \ + (((uint32_t)(S.bytes[29])) << 24)) +#define READ_ROW7() \ + (((uint32_t)(S.bytes[16])) | \ + (((uint32_t)(S.bytes[20])) << 8) | \ + (((uint32_t)(S.bytes[24])) << 16) | \ + (((uint32_t)(S.bytes[28])) << 24)) +#define WRITE_ROW(row, value) \ + do { \ + if ((row) < 4) { \ + S.bytes[3 - (row)] = (uint8_t)(value); \ + S.bytes[7 - (row)] = (uint8_t)((value) >> 8); \ + S.bytes[11 - (row)] = (uint8_t)((value) >> 16); \ + S.bytes[15 - (row)] = (uint8_t)((value) >> 24); \ + } else { \ + S.bytes[20 - (row)] = (uint8_t)(value); \ + S.bytes[24 - (row)] = (uint8_t)((value) >> 8); \ + S.bytes[28 - (row)] = (uint8_t)((value) >> 16); \ + S.bytes[32 - (row)] = (uint8_t)((value) >> 24); \ + } \ + } while (0) +#endif + +void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]) +{ + union { + uint32_t words[PHOTON256_STATE_SIZE / 4]; + uint8_t bytes[PHOTON256_STATE_SIZE]; + } S; + uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8; + uint8_t round; + + /* Convert the state into bit-sliced form */ + photon256_to_sliced(S.words, state); + + /* Perform all 12 permutation rounds */ + for (round = 0; round < PHOTON256_ROUNDS; ++round) { + /* Add the constants for this round */ + t0 = photon256_rc[round]; + S.words[0] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[1] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[2] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[3] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[4] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[5] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[6] ^= t0 & 0x01010101U; + t0 >>= 1; + S.words[7] ^= t0 & 0x01010101U; + + /* Apply the sbox to all nibbles in the state */ + photon256_sbox(S.words[0], S.words[1], S.words[2], S.words[3]); + photon256_sbox(S.words[4], S.words[5], S.words[6], S.words[7]); + + /* Rotate all rows left by the row number. + * + * We do this by applying permutations to the top and bottom words + * to rearrange the bits into the rotated form. Permutations + * generated with "http://programming.sirrida.de/calcperm.php". + * + * P_top = [0 1 2 3 4 5 6 7 15 8 9 10 11 12 13 14 22 23 + * 16 17 18 19 20 21 29 30 31 24 25 26 27 28] + * P_bot = [4 5 6 7 0 1 2 3 11 12 13 14 15 8 9 10 18 19 + * 20 21 22 23 16 17 25 26 27 28 29 30 31 24 + */ + #define TOP_ROTATE_PERM(x) \ + do { \ + t1 = (x); \ + bit_permute_step(t1, 0x07030100, 4); \ + bit_permute_step(t1, 0x22331100, 2); \ + bit_permute_step(t1, 0x55005500, 1); \ + (x) = t1; \ + } while (0) + #define BOTTOM_ROTATE_PERM(x) \ + do { \ + t1 = (x); \ + bit_permute_step(t1, 0x080c0e0f, 4); \ + bit_permute_step(t1, 0x22331100, 2); \ + bit_permute_step(t1, 0x55005500, 1); \ + (x) = t1; \ + } while (0) + TOP_ROTATE_PERM(S.words[0]); + TOP_ROTATE_PERM(S.words[1]); + TOP_ROTATE_PERM(S.words[2]); + TOP_ROTATE_PERM(S.words[3]); + BOTTOM_ROTATE_PERM(S.words[4]); + BOTTOM_ROTATE_PERM(S.words[5]); + BOTTOM_ROTATE_PERM(S.words[6]); + BOTTOM_ROTATE_PERM(S.words[7]); + + /* Mix the columns */ + #define MUL(a, x) (photon256_field_multiply((a), (x))) + t0 = READ_ROW0(); + t1 = READ_ROW1(); + t2 = READ_ROW2(); + t3 = READ_ROW3(); + t4 = READ_ROW4(); + t5 = READ_ROW5(); + t6 = READ_ROW6(); + t7 = READ_ROW7(); + t8 = MUL(0x02, t0) ^ MUL(0x04, t1) ^ MUL(0x02, t2) ^ MUL(0x0b, t3) ^ + MUL(0x02, t4) ^ MUL(0x08, t5) ^ MUL(0x05, t6) ^ MUL(0x06, t7); + WRITE_ROW(0, t8); + t8 = MUL(0x0c, t0) ^ MUL(0x09, t1) ^ MUL(0x08, t2) ^ MUL(0x0d, t3) ^ + MUL(0x07, t4) ^ MUL(0x07, t5) ^ MUL(0x05, t6) ^ MUL(0x02, t7); + WRITE_ROW(1, t8); + t8 = MUL(0x04, t0) ^ MUL(0x04, t1) ^ MUL(0x0d, t2) ^ MUL(0x0d, t3) ^ + MUL(0x09, t4) ^ MUL(0x04, t5) ^ MUL(0x0d, t6) ^ MUL(0x09, t7); + WRITE_ROW(2, t8); + t8 = MUL(0x01, t0) ^ MUL(0x06, t1) ^ MUL(0x05, t2) ^ MUL(0x01, t3) ^ + MUL(0x0c, t4) ^ MUL(0x0d, t5) ^ MUL(0x0f, t6) ^ MUL(0x0e, t7); + WRITE_ROW(3, t8); + t8 = MUL(0x0f, t0) ^ MUL(0x0c, t1) ^ MUL(0x09, t2) ^ MUL(0x0d, t3) ^ + MUL(0x0e, t4) ^ MUL(0x05, t5) ^ MUL(0x0e, t6) ^ MUL(0x0d, t7); + WRITE_ROW(4, t8); + t8 = MUL(0x09, t0) ^ MUL(0x0e, t1) ^ MUL(0x05, t2) ^ MUL(0x0f, t3) ^ + MUL(0x04, t4) ^ MUL(0x0c, t5) ^ MUL(0x09, t6) ^ MUL(0x06, t7); + WRITE_ROW(5, t8); + t8 = MUL(0x0c, t0) ^ MUL(0x02, t1) ^ MUL(0x02, t2) ^ MUL(0x0a, t3) ^ + MUL(0x03, t4) ^ MUL(0x01, t5) ^ MUL(0x01, t6) ^ MUL(0x0e, t7); + WRITE_ROW(6, t8); + t8 = MUL(0x0f, t0) ^ MUL(0x01, t1) ^ MUL(0x0d, t2) ^ MUL(0x0a, t3) ^ + MUL(0x05, t4) ^ MUL(0x0a, t5) ^ MUL(0x02, t6) ^ MUL(0x03, t7); + WRITE_ROW(7, t8); + } + + /* Convert back from bit-sliced form to regular form */ + photon256_from_sliced(state, S.bytes); +} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.h new file mode 100644 index 0000000..ce8729a --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-photon256.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_PHOTON256_H +#define LW_INTERNAL_PHOTON256_H + +/** + * \file internal-photon256.h + * \brief Internal implementation of the PHOTON-256 permutation. + * + * Warning: The current implementation of PHOTON-256 is constant-time + * but not constant-cache. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the PHOTON-256 permutation state in bytes. + */ +#define PHOTON256_STATE_SIZE 32 + +/** + * \brief Permutes the PHOTON-256 state. + * + * \param state The state to be permuted. + */ +void photon256_permute(unsigned char state[PHOTON256_STATE_SIZE]); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-util.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.c b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.c new file mode 100644 index 0000000..f44bdad --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.c @@ -0,0 +1,451 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "photon-beetle.h" +#include "internal-photon256.h" +#include "internal-util.h" +#include + +aead_cipher_t const photon_beetle_128_cipher = { + "PHOTON-Beetle-AEAD-ENC-128", + PHOTON_BEETLE_KEY_SIZE, + PHOTON_BEETLE_NONCE_SIZE, + PHOTON_BEETLE_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + photon_beetle_128_aead_encrypt, + photon_beetle_128_aead_decrypt +}; + +aead_cipher_t const photon_beetle_32_cipher = { + "PHOTON-Beetle-AEAD-ENC-32", + PHOTON_BEETLE_KEY_SIZE, + PHOTON_BEETLE_NONCE_SIZE, + PHOTON_BEETLE_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + photon_beetle_32_aead_encrypt, + photon_beetle_32_aead_decrypt +}; + +aead_hash_algorithm_t const photon_beetle_hash_algorithm = { + "PHOTON-Beetle-HASH", + sizeof(int), + PHOTON_BEETLE_HASH_SIZE, + AEAD_FLAG_NONE, + photon_beetle_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-128. + */ +#define PHOTON_BEETLE_128_RATE 16 + +/** + * \brief Rate of operation for PHOTON-Beetle-AEAD-ENC-32. + */ +#define PHOTON_BEETLE_32_RATE 4 + +/* Shifts a domain constant from the spec to the correct bit position */ +#define DOMAIN(c) ((c) << 5) + +/** + * \brief Processes the associated data for PHOTON-Beetle. + * + * \param state PHOTON-256 permutation state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data, must be non-zero. + * \param rate Rate of absorption for the data. + * \param mempty Non-zero if the message is empty. + */ +static void photon_beetle_process_ad + (unsigned char state[PHOTON256_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen, + unsigned rate, int mempty) +{ + unsigned temp; + + /* Absorb as many full rate blocks as possible */ + while (adlen > rate) { + photon256_permute(state); + lw_xor_block(state, ad, rate); + ad += rate; + adlen -= rate; + } + + /* Pad and absorb the last block */ + temp = (unsigned)adlen; + photon256_permute(state); + lw_xor_block(state, ad, temp); + if (temp < rate) + state[temp] ^= 0x01; /* padding */ + + /* Add the domain constant to finalize associated data processing */ + if (mempty && temp == rate) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(3); + else if (mempty) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(4); + else if (temp == rate) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + else + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); +} + +/** + * \brief Rotates part of the PHOTON-256 state right by one bit. + * + * \param out Output state buffer. + * \param in Input state buffer, must not overlap with \a out. + * \param len Length of the state buffer. + */ +static void photon_beetle_rotate1 + (unsigned char *out, const unsigned char *in, unsigned len) +{ + unsigned posn; + for (posn = 0; posn < (len - 1); ++posn) + out[posn] = (in[posn] >> 1) | (in[posn + 1] << 7); + out[len - 1] = (in[len - 1] >> 1) | (in[0] << 7); +} + +/** + * \brief Encrypts a plaintext block with PHOTON-Beetle. + * + * \param state PHOTON-256 permutation state. + * \param c Points to the ciphertext output buffer. + * \param m Points to the plaintext input buffer. + * \param mlen Length of the message, must be non-zero. + * \param rate Rate of absorption for the data. + * \param adempty Non-zero if the associated data is empty. + */ +static void photon_beetle_encrypt + (unsigned char state[PHOTON256_STATE_SIZE], + unsigned char *c, const unsigned char *m, unsigned long long mlen, + unsigned rate, int adempty) +{ + unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ + unsigned temp; + + /* Process all plaintext blocks except the last */ + while (mlen > rate) { + photon256_permute(state); + memcpy(shuffle, state + rate / 2, rate / 2); + photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); + lw_xor_block(state, m, rate); + lw_xor_block_2_src(c, m, shuffle, rate); + c += rate; + m += rate; + mlen -= rate; + } + + /* Pad and process the last block */ + temp = (unsigned)mlen; + photon256_permute(state); + memcpy(shuffle, state + rate / 2, rate / 2); + photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); + if (temp == rate) { + lw_xor_block(state, m, rate); + lw_xor_block_2_src(c, m, shuffle, rate); + } else { + lw_xor_block(state, m, temp); + state[temp] ^= 0x01; /* padding */ + lw_xor_block_2_src(c, m, shuffle, temp); + } + + /* Add the domain constant to finalize message processing */ + if (adempty && temp == rate) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); + else if (adempty) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); + else if (temp == rate) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + else + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); +} + +/** + * \brief Decrypts a ciphertext block with PHOTON-Beetle. + * + * \param state PHOTON-256 permutation state. + * \param m Points to the plaintext output buffer. + * \param c Points to the ciphertext input buffer. + * \param mlen Length of the message, must be non-zero. + * \param rate Rate of absorption for the data. + * \param adempty Non-zero if the associated data is empty. + */ +static void photon_beetle_decrypt + (unsigned char state[PHOTON256_STATE_SIZE], + unsigned char *m, const unsigned char *c, unsigned long long mlen, + unsigned rate, int adempty) +{ + unsigned char shuffle[PHOTON_BEETLE_128_RATE]; /* Block of max rate size */ + unsigned temp; + + /* Process all plaintext blocks except the last */ + while (mlen > rate) { + photon256_permute(state); + memcpy(shuffle, state + rate / 2, rate / 2); + photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); + lw_xor_block_2_src(m, c, shuffle, rate); + lw_xor_block(state, m, rate); + c += rate; + m += rate; + mlen -= rate; + } + + /* Pad and process the last block */ + temp = (unsigned)mlen; + photon256_permute(state); + memcpy(shuffle, state + rate / 2, rate / 2); + photon_beetle_rotate1(shuffle + rate / 2, state, rate / 2); + if (temp == rate) { + lw_xor_block_2_src(m, c, shuffle, rate); + lw_xor_block(state, m, rate); + } else { + lw_xor_block_2_src(m, c, shuffle, temp); + lw_xor_block(state, m, temp); + state[temp] ^= 0x01; /* padding */ + } + + /* Add the domain constant to finalize message processing */ + if (adempty && temp == rate) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(5); + else if (adempty) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(6); + else if (temp == rate) + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + else + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); +} + +int photon_beetle_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + PHOTON_BEETLE_TAG_SIZE; + + /* Initialize the state by concatenating the nonce and the key */ + memcpy(state, npub, 16); + memcpy(state + 16, k, 16); + + /* Process the associated data */ + if (adlen > 0) { + photon_beetle_process_ad + (state, ad, adlen, PHOTON_BEETLE_128_RATE, mlen == 0); + } else if (mlen == 0) { + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + } + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + photon_beetle_encrypt + (state, c, m, mlen, PHOTON_BEETLE_128_RATE, adlen == 0); + } + + /* Generate the authentication tag */ + photon256_permute(state); + memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); + return 0; +} + +int photon_beetle_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < PHOTON_BEETLE_TAG_SIZE) + return -1; + *mlen = clen - PHOTON_BEETLE_TAG_SIZE; + + /* Initialize the state by concatenating the nonce and the key */ + memcpy(state, npub, 16); + memcpy(state + 16, k, 16); + + /* Process the associated data */ + clen -= PHOTON_BEETLE_TAG_SIZE; + if (adlen > 0) { + photon_beetle_process_ad + (state, ad, adlen, PHOTON_BEETLE_128_RATE, clen == 0); + } else if (clen == 0) { + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + } + + /* Decrypt the ciphertext to produce the plaintext */ + if (clen > 0) { + photon_beetle_decrypt + (state, m, c, clen, PHOTON_BEETLE_128_RATE, adlen == 0); + } + + /* Check the authentication tag */ + photon256_permute(state); + return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); +} + +int photon_beetle_32_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + PHOTON_BEETLE_TAG_SIZE; + + /* Initialize the state by concatenating the nonce and the key */ + memcpy(state, npub, 16); + memcpy(state + 16, k, 16); + + /* Process the associated data */ + if (adlen > 0) { + photon_beetle_process_ad + (state, ad, adlen, PHOTON_BEETLE_32_RATE, mlen == 0); + } else if (mlen == 0) { + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + } + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + photon_beetle_encrypt + (state, c, m, mlen, PHOTON_BEETLE_32_RATE, adlen == 0); + } + + /* Generate the authentication tag */ + photon256_permute(state); + memcpy(c + mlen, state, PHOTON_BEETLE_TAG_SIZE); + return 0; +} + +int photon_beetle_32_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < PHOTON_BEETLE_TAG_SIZE) + return -1; + *mlen = clen - PHOTON_BEETLE_TAG_SIZE; + + /* Initialize the state by concatenating the nonce and the key */ + memcpy(state, npub, 16); + memcpy(state + 16, k, 16); + + /* Process the associated data */ + clen -= PHOTON_BEETLE_TAG_SIZE; + if (adlen > 0) { + photon_beetle_process_ad + (state, ad, adlen, PHOTON_BEETLE_32_RATE, clen == 0); + } else if (clen == 0) { + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + } + + /* Decrypt the ciphertext to produce the plaintext */ + if (clen > 0) { + photon_beetle_decrypt + (state, m, c, clen, PHOTON_BEETLE_32_RATE, adlen == 0); + } + + /* Check the authentication tag */ + photon256_permute(state); + return aead_check_tag(m, clen, state, c + clen, PHOTON_BEETLE_TAG_SIZE); +} + +int photon_beetle_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char state[PHOTON256_STATE_SIZE]; + unsigned temp; + + /* Absorb the input data */ + if (inlen == 0) { + /* No input data at all */ + memset(state, 0, sizeof(state) - 1); + state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); + } else if (inlen <= PHOTON_BEETLE_128_RATE) { + /* Only one block of input data, which may require padding */ + temp = (unsigned)inlen; + memcpy(state, in, temp); + memset(state + temp, 0, sizeof(state) - temp - 1); + if (temp < PHOTON_BEETLE_128_RATE) { + state[temp] = 0x01; + state[PHOTON256_STATE_SIZE - 1] = DOMAIN(1); + } else { + state[PHOTON256_STATE_SIZE - 1] = DOMAIN(2); + } + } else { + /* Initialize the state with the first block, then absorb the rest */ + memcpy(state, in, PHOTON_BEETLE_128_RATE); + memset(state + PHOTON_BEETLE_128_RATE, 0, + sizeof(state) - PHOTON_BEETLE_128_RATE); + in += PHOTON_BEETLE_128_RATE; + inlen -= PHOTON_BEETLE_128_RATE; + while (inlen > PHOTON_BEETLE_32_RATE) { + photon256_permute(state); + lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); + in += PHOTON_BEETLE_32_RATE; + inlen -= PHOTON_BEETLE_32_RATE; + } + photon256_permute(state); + temp = (unsigned)inlen; + if (temp == PHOTON_BEETLE_32_RATE) { + lw_xor_block(state, in, PHOTON_BEETLE_32_RATE); + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(1); + } else { + lw_xor_block(state, in, temp); + state[temp] ^= 0x01; + state[PHOTON256_STATE_SIZE - 1] ^= DOMAIN(2); + } + } + + /* Generate the output hash */ + photon256_permute(state); + memcpy(out, state, 16); + photon256_permute(state); + memcpy(out + 16, state, 16); + return 0; +} diff --git a/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.h b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.h new file mode 100644 index 0000000..2d94a7e --- /dev/null +++ b/photon-beetle/Implementations/crypto_hash/photonbeetlehash256rate32v1/rhys/photon-beetle.h @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_PHOTON_BEETLE_H +#define LWCRYPTO_PHOTON_BEETLE_H + +#include "aead-common.h" + +/** + * \file photon-beetle.h + * \brief PHOTON-Beetle authenticated encryption algorithm. + * + * PHOTON-Beetle is a family of authenticated encryption algorithms based + * on the PHOTON-256 permutation and using the Beetle sponge mode. + * There are three algorithms in the family: + * + * \li PHOTON-Beetle-AEAD-ENC-128 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag. Data is handled in 16 byte blocks. This is the primary + * member of the family for encryption. + * \li PHOTON-Beetle-AEAD-ENC-32 with a 128-bit key, a 128-bit nonce, and a + * 128-bit tag. Data is handled in 4 byte blocks. + * \li PHOTON-Beetle-Hash with a 256-bit hash output. The initial data is + * handled as a 16 byte block, and then the remaining bytes are processed + * in 4 byte blocks. + * + * References: https://www.isical.ac.in/~lightweight/beetle/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for PHOTON-Beetle. + */ +#define PHOTON_BEETLE_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for PHOTON-Beetle. + */ +#define PHOTON_BEETLE_TAG_SIZE 16 + +/** + * \brief Size of the nonce for PHOTON-Beetle. + */ +#define PHOTON_BEETLE_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for PHOTON-Beetle-HASH. + */ +#define PHOTON_BEETLE_HASH_SIZE 32 + +/** + * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-128 cipher. + */ +extern aead_cipher_t const photon_beetle_128_cipher; + +/** + * \brief Meta-information block for the PHOTON-Beetle-AEAD-ENC-32 cipher. + */ +extern aead_cipher_t const photon_beetle_32_cipher; + +/** + * \brief Meta-information block for the PHOTON-Beetle-HASH algorithm. + */ +extern aead_hash_algorithm_t const photon_beetle_hash_algorithm; + +/** + * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa photon_beetle_128_aead_decrypt() + */ +int photon_beetle_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa photon_beetle_128_aead_encrypt() + */ +int photon_beetle_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa photon_beetle_32_aead_decrypt() + */ +int photon_beetle_32_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with PHOTON-Beetle-AEAD-ENC-32. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa photon_beetle_32_aead_encrypt() + */ +int photon_beetle_32_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with PHOTON-Beetle-HASH to + * generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * PHOTON_BEETLE_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int photon_beetle_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.c b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/api.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/encrypt.c b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/encrypt.c deleted file mode 100644 index a63877d..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "pyjamask.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return pyjamask_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return pyjamask_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-ocb.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-ocb.h deleted file mode 100644 index 98f2a31..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-ocb.h +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_OCB_H -#define LW_INTERNAL_OCB_H - -#include "internal-util.h" -#include - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying block cipher: - * - * OCB_ALG_NAME Name of the algorithm that is using OCB mode. - * OCB_BLOCK_SIZE Size of the block for the underlying cipher in bytes. - * OCB_NONCE_SIZE Size of the nonce which must be < OCB_BLOCK_SIZE. - * OCB_TAG_SIZE Size of the authentication tag. - * OCB_KEY_SCHEDULE Type for the key schedule. - * OCB_SETUP_KEY Name of the key schedule setup function. - * OCB_ENCRYPT_BLOCK Name of the block cipher ECB encrypt function. - * OCB_DECRYPT_BLOCK Name of the block cipher ECB decrypt function. - * OCB_DOUBLE_L Name of the function to double L (optional). - */ -#if defined(OCB_ENCRYPT_BLOCK) - -/** - * \file internal-ocb.h - * \brief Internal implementation of the OCB block cipher mode. - * - * Note that OCB is covered by patents so it may not be usable in all - * applications. Open source applications should be covered, but for - * others you will need to contact the patent authors to find out - * if you can use it or if a paid license is required. - * - * License information: https://web.cs.ucdavis.edu/~rogaway/ocb/license.htm - * - * References: https://tools.ietf.org/html/rfc7253 - */ - -#define OCB_CONCAT_INNER(name,suffix) name##suffix -#define OCB_CONCAT(name,suffix) OCB_CONCAT_INNER(name,suffix) - -#if !defined(OCB_DOUBLE_L) - -#define OCB_DOUBLE_L OCB_CONCAT(OCB_ALG_NAME,_double_l) - -#if OCB_BLOCK_SIZE == 16 - -/* Double a value in GF(128) */ -static void OCB_DOUBLE_L(unsigned char out[16], const unsigned char in[16]) -{ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)in[0]) >> 7); - for (index = 0; index < 15; ++index) - out[index] = (in[index] << 1) | (in[index + 1] >> 7); - out[15] = (in[15] << 1) ^ (mask & 0x87); -} - -#elif OCB_BLOCK_SIZE == 12 - -/* Double a value in GF(96) */ -static void OCB_DOUBLE_L - (unsigned char out[12], const unsigned char in[12]) -{ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)in[0]) >> 7); - for (index = 0; index < 11; ++index) - out[index] = (in[index] << 1) | (in[index + 1] >> 7); - out[11] = (in[11] << 1) ^ (mask & 0x41); - out[10] ^= (mask & 0x06); -} - -#else -#error "Unknown block size for OCB" -#endif - -#endif - -/* State information for OCB functions */ -#define OCB_STATE OCB_CONCAT(OCB_ALG_NAME,_state_t) -typedef struct -{ - OCB_KEY_SCHEDULE ks; - unsigned char Lstar[OCB_BLOCK_SIZE]; - unsigned char Ldollar[OCB_BLOCK_SIZE]; - unsigned char L0[OCB_BLOCK_SIZE]; - unsigned char L1[OCB_BLOCK_SIZE]; - -} OCB_STATE; - -/* Initializes the OCB state from the key and nonce */ -static void OCB_CONCAT(OCB_ALG_NAME,_init) - (OCB_STATE *state, const unsigned char *k, const unsigned char *nonce, - unsigned char offset[OCB_BLOCK_SIZE]) -{ - unsigned bottom; - - /* Set up the key schedule */ - OCB_SETUP_KEY(&(state->ks), k); - - /* Derive the values of L*, L$, L0, and L1 */ - memset(state->Lstar, 0, sizeof(state->Lstar)); - OCB_ENCRYPT_BLOCK(&(state->ks), state->Lstar, state->Lstar); - OCB_DOUBLE_L(state->Ldollar, state->Lstar); - OCB_DOUBLE_L(state->L0, state->Ldollar); - OCB_DOUBLE_L(state->L1, state->L0); - - /* Derive the initial offset from the nonce */ - memset(offset, 0, OCB_BLOCK_SIZE); - memcpy(offset + OCB_BLOCK_SIZE - OCB_NONCE_SIZE, nonce, OCB_NONCE_SIZE); - offset[0] = ((OCB_TAG_SIZE * 8) & 0x7F) << 1; - offset[OCB_BLOCK_SIZE - OCB_NONCE_SIZE - 1] |= 0x01; - bottom = offset[OCB_BLOCK_SIZE - 1] & 0x3F; - offset[OCB_BLOCK_SIZE - 1] &= 0xC0; - { - unsigned index; - unsigned byte_posn = bottom / 8; -#if OCB_BLOCK_SIZE == 16 - /* Standard OCB with a 128-bit block */ - unsigned char stretch[24]; - OCB_ENCRYPT_BLOCK(&(state->ks), stretch, offset); - memcpy(stretch + 16, stretch + 1, 8); - lw_xor_block(stretch + 16, stretch, 8); -#elif OCB_BLOCK_SIZE == 12 - /* 96-bit block handling from the Pyjamask specification */ - unsigned char stretch[20]; - OCB_ENCRYPT_BLOCK(&(state->ks), stretch, offset); - for (index = 0; index < 8; ++index) { - stretch[index + 12] = - (stretch[index + 1] << 1) | (stretch[index + 2] >> 7); - } - lw_xor_block(stretch + 12, stretch, 8); -#else - unsigned char stretch[OCB_BLOCK_SIZE + 8] = {0}; - #error "unsupported block size for OCB mode" -#endif - bottom %= 8; - if (bottom != 0) { - for (index = 0; index < OCB_BLOCK_SIZE; ++index) { - offset[index] = - (stretch[index + byte_posn] << bottom) | - (stretch[index + byte_posn + 1] >> (8 - bottom)); - } - } else { - memcpy(offset, stretch + byte_posn, OCB_BLOCK_SIZE); - } - } -} - -/* Calculate L_{ntz(i)} when the last two bits of i are zero */ -static void OCB_CONCAT(OCB_ALG_NAME,_calculate_L) - (OCB_STATE *state, unsigned char L[OCB_BLOCK_SIZE], unsigned long long i) -{ - OCB_DOUBLE_L(L, state->L1); - i >>= 2; - while ((i & 1) == 0) { - OCB_DOUBLE_L(L, L); - i >>= 1; - } -} - -/* Process associated data with OCB */ -static void OCB_CONCAT(OCB_ALG_NAME,_process_ad) - (OCB_STATE *state, unsigned char tag[OCB_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char offset[OCB_BLOCK_SIZE]; - unsigned char block[OCB_BLOCK_SIZE]; - unsigned long long block_number; - - /* Process all full blocks */ - memset(offset, 0, sizeof(offset)); - block_number = 1; - while (adlen >= OCB_BLOCK_SIZE) { - if (block_number & 1) { - lw_xor_block(offset, state->L0, OCB_BLOCK_SIZE); - } else if ((block_number & 3) == 2) { - lw_xor_block(offset, state->L1, OCB_BLOCK_SIZE); - } else { - OCB_CONCAT(OCB_ALG_NAME,_calculate_L)(state, block, block_number); - lw_xor_block(offset, block, OCB_BLOCK_SIZE); - } - lw_xor_block_2_src(block, offset, ad, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state->ks), block, block); - lw_xor_block(tag, block, OCB_BLOCK_SIZE); - ad += OCB_BLOCK_SIZE; - adlen -= OCB_BLOCK_SIZE; - ++block_number; - } - - /* Pad and process the last partial block */ - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(offset, state->Lstar, OCB_BLOCK_SIZE); - lw_xor_block(offset, ad, temp); - offset[temp] ^= 0x80; - OCB_ENCRYPT_BLOCK(&(state->ks), block, offset); - lw_xor_block(tag, block, OCB_BLOCK_SIZE); - } -} - -int OCB_CONCAT(OCB_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - OCB_STATE state; - unsigned char offset[OCB_BLOCK_SIZE]; - unsigned char sum[OCB_BLOCK_SIZE]; - unsigned char block[OCB_BLOCK_SIZE]; - unsigned long long block_number; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + OCB_TAG_SIZE; - - /* Initialize the OCB state */ - OCB_CONCAT(OCB_ALG_NAME,_init)(&state, k, npub, offset); - - /* Process all plaintext blocks except the last */ - memset(sum, 0, sizeof(sum)); - block_number = 1; - while (mlen >= OCB_BLOCK_SIZE) { - if (block_number & 1) { - lw_xor_block(offset, state.L0, OCB_BLOCK_SIZE); - } else if ((block_number & 3) == 2) { - lw_xor_block(offset, state.L1, OCB_BLOCK_SIZE); - } else { - OCB_CONCAT(OCB_ALG_NAME,_calculate_L)(&state, block, block_number); - lw_xor_block(offset, block, OCB_BLOCK_SIZE); - } - lw_xor_block(sum, m, OCB_BLOCK_SIZE); - lw_xor_block_2_src(block, offset, m, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), block, block); - lw_xor_block_2_src(c, block, offset, OCB_BLOCK_SIZE); - c += OCB_BLOCK_SIZE; - m += OCB_BLOCK_SIZE; - mlen -= OCB_BLOCK_SIZE; - ++block_number; - } - - /* Pad and process the last plaintext block */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - lw_xor_block(offset, state.Lstar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), block, offset); - lw_xor_block_2_src(c, block, m, temp); - c += temp; - } - - /* Finalize the encryption phase */ - lw_xor_block(sum, offset, OCB_BLOCK_SIZE); - lw_xor_block(sum, state.Ldollar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), sum, sum); - - /* Process the associated data and compute the final authentication tag */ - OCB_CONCAT(OCB_ALG_NAME,_process_ad)(&state, sum, ad, adlen); - memcpy(c, sum, OCB_TAG_SIZE); - return 0; -} - -int OCB_CONCAT(OCB_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - OCB_STATE state; - unsigned char *mtemp = m; - unsigned char offset[OCB_BLOCK_SIZE]; - unsigned char sum[OCB_BLOCK_SIZE]; - unsigned char block[OCB_BLOCK_SIZE]; - unsigned long long block_number; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < OCB_TAG_SIZE) - return -1; - *mlen = clen - OCB_TAG_SIZE; - - /* Initialize the OCB state */ - OCB_CONCAT(OCB_ALG_NAME,_init)(&state, k, npub, offset); - - /* Process all ciphertext blocks except the last */ - memset(sum, 0, sizeof(sum)); - block_number = 1; - clen -= OCB_TAG_SIZE; - while (clen >= OCB_BLOCK_SIZE) { - if (block_number & 1) { - lw_xor_block(offset, state.L0, OCB_BLOCK_SIZE); - } else if ((block_number & 3) == 2) { - lw_xor_block(offset, state.L1, OCB_BLOCK_SIZE); - } else { - OCB_CONCAT(OCB_ALG_NAME,_calculate_L)(&state, block, block_number); - lw_xor_block(offset, block, OCB_BLOCK_SIZE); - } - lw_xor_block_2_src(block, offset, c, OCB_BLOCK_SIZE); - OCB_DECRYPT_BLOCK(&(state.ks), block, block); - lw_xor_block_2_src(m, block, offset, OCB_BLOCK_SIZE); - lw_xor_block(sum, m, OCB_BLOCK_SIZE); - c += OCB_BLOCK_SIZE; - m += OCB_BLOCK_SIZE; - clen -= OCB_BLOCK_SIZE; - ++block_number; - } - - /* Pad and process the last ciphertext block */ - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block(offset, state.Lstar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), block, offset); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - c += temp; - } - - /* Finalize the decryption phase */ - lw_xor_block(sum, offset, OCB_BLOCK_SIZE); - lw_xor_block(sum, state.Ldollar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), sum, sum); - - /* Process the associated data and check the final authentication tag */ - OCB_CONCAT(OCB_ALG_NAME,_process_ad)(&state, sum, ad, adlen); - return aead_check_tag(mtemp, *mlen, sum, c, OCB_TAG_SIZE); -} - -#endif /* OCB_ENCRYPT_BLOCK */ - -#endif /* LW_INTERNAL_OCB_H */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask-avr.S b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask-avr.S deleted file mode 100644 index b7cc631..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask-avr.S +++ /dev/null @@ -1,8883 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global pyjamask_96_setup_key - .type pyjamask_96_setup_key, @function -pyjamask_96_setup_key: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - mov r26,r1 -29: - movw r12,r18 - movw r14,r20 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,202 - mov r12,r27 - ldi r27,185 - mov r13,r27 - ldi r27,129 - mov r14,r27 - ldi r27,184 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,229 - ldi r25,220 - ldi r16,64 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,114 - ldi r25,110 - ldi r16,32 - ldi r17,174 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,57 - ldi r25,55 - ldi r16,16 - ldi r17,87 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,156 - ldi r25,27 - ldi r16,136 - ldi r17,171 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,206 - ldi r25,13 - ldi r16,196 - ldi r17,85 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,231 - ldi r25,6 - ldi r16,226 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,115 - ldi r25,3 - ldi r16,113 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,185 - ldi r25,129 - ldi r16,184 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,220 - ldi r25,64 - ldi r16,92 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,110 - ldi r25,32 - ldi r16,174 - ldi r17,114 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,55 - ldi r25,16 - ldi r16,87 - ldi r17,57 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,27 - ldi r25,136 - ldi r16,171 - ldi r17,156 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,13 - ldi r25,196 - ldi r16,85 - ldi r17,206 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,6 - ldi r25,226 - ldi r16,42 - ldi r17,231 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,3 - ldi r25,113 - ldi r16,149 - ldi r17,115 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,129 - ldi r25,184 - ldi r16,202 - ldi r17,185 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,92 - ldi r16,229 - ldi r17,220 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,32 - ldi r25,174 - ldi r16,114 - ldi r17,110 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,16 - ldi r25,87 - ldi r16,57 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,136 - ldi r25,171 - ldi r16,156 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,196 - ldi r25,85 - ldi r16,206 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,226 - ldi r25,42 - ldi r16,231 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,113 - ldi r25,149 - ldi r16,115 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,184 - ldi r25,202 - ldi r16,185 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,92 - ldi r25,229 - ldi r16,220 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,174 - ldi r25,114 - ldi r16,110 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,87 - ldi r25,57 - ldi r16,55 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,171 - ldi r25,156 - ldi r16,27 - ldi r17,136 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,85 - ldi r25,206 - ldi r16,13 - ldi r17,196 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,42 - ldi r25,231 - ldi r16,6 - ldi r17,226 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,149 - ldi r25,115 - ldi r16,3 - ldi r17,113 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - ldi r25,128 - eor r18,r25 - eor r18,r26 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - ldi r24,106 - eor r23,r24 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - lsl r4 - rol r5 - rol r6 - rol r7 - adc r4,r1 - ldi r17,63 - eor r6,r17 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - ldi r16,36 - eor r11,r16 - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - inc r26 - ldi r27,14 - cpse r26,r27 - rjmp 29b - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size pyjamask_96_setup_key, .-pyjamask_96_setup_key - - .text -.global pyjamask_96_encrypt - .type pyjamask_96_encrypt, @function -pyjamask_96_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 16 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ldi r26,14 -13: - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r18 - and r0,r22 - eor r4,r0 - mov r0,r19 - and r0,r23 - eor r5,r0 - mov r0,r20 - and r0,r2 - eor r6,r0 - mov r0,r21 - and r0,r3 - eor r7,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r18 - and r0,r4 - eor r22,r0 - mov r0,r19 - and r0,r5 - eor r23,r0 - mov r0,r20 - and r0,r6 - eor r2,r0 - mov r0,r21 - and r0,r7 - eor r3,r0 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - com r4 - com r5 - com r6 - com r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,133 - mov r8,r27 - ldi r27,16 - mov r9,r27 - ldi r27,134 - mov r10,r27 - ldi r27,163 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,8 - ldi r16,195 - ldi r17,209 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,132 - ldi r16,225 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,16 - ldi r25,194 - ldi r16,112 - ldi r17,180 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,8 - ldi r25,97 - ldi r16,56 - ldi r17,90 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,48 - ldi r16,28 - ldi r17,45 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,24 - ldi r16,142 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,12 - ldi r16,71 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,16 - ldi r25,134 - ldi r16,163 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,8 - ldi r25,195 - ldi r16,209 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,132 - ldi r25,225 - ldi r16,104 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,194 - ldi r25,112 - ldi r16,180 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,97 - ldi r25,56 - ldi r16,90 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,48 - ldi r25,28 - ldi r16,45 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,24 - ldi r25,142 - ldi r16,22 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,12 - ldi r25,71 - ldi r16,11 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,134 - ldi r25,163 - ldi r16,133 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,195 - ldi r25,209 - ldi r16,66 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,225 - ldi r25,104 - ldi r16,33 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,112 - ldi r25,180 - ldi r16,16 - ldi r17,194 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,56 - ldi r25,90 - ldi r16,8 - ldi r17,97 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,28 - ldi r25,45 - ldi r16,132 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,142 - ldi r25,22 - ldi r16,66 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,71 - ldi r25,11 - ldi r16,33 - ldi r17,12 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,163 - ldi r25,133 - ldi r16,16 - ldi r17,134 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,209 - ldi r25,66 - ldi r16,8 - ldi r17,195 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,104 - ldi r25,33 - ldi r16,132 - ldi r17,225 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,180 - ldi r25,16 - ldi r16,194 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,90 - ldi r25,8 - ldi r16,97 - ldi r17,56 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,45 - ldi r25,132 - ldi r16,48 - ldi r17,28 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,22 - ldi r25,66 - ldi r16,24 - ldi r17,142 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,11 - ldi r25,33 - ldi r16,12 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r18,r8 - movw r20,r10 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,33 - mov r8,r27 - ldi r27,112 - mov r9,r27 - ldi r27,65 - mov r10,r27 - ldi r27,99 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,16 - ldi r25,184 - ldi r16,160 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,8 - ldi r25,92 - ldi r16,208 - ldi r17,88 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,4 - ldi r25,46 - ldi r16,104 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,2 - ldi r25,23 - ldi r16,52 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,129 - ldi r25,11 - ldi r16,26 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,192 - ldi r25,5 - ldi r16,141 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,224 - ldi r25,130 - ldi r16,198 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,112 - ldi r25,65 - ldi r16,99 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,184 - ldi r25,160 - ldi r16,177 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,92 - ldi r25,208 - ldi r16,88 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,46 - ldi r25,104 - ldi r16,44 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,23 - ldi r25,52 - ldi r16,22 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,11 - ldi r25,26 - ldi r16,11 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,5 - ldi r25,141 - ldi r16,133 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,130 - ldi r25,198 - ldi r16,66 - ldi r17,224 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,65 - ldi r25,99 - ldi r16,33 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,160 - ldi r25,177 - ldi r16,16 - ldi r17,184 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,208 - ldi r25,88 - ldi r16,8 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,104 - ldi r25,44 - ldi r16,4 - ldi r17,46 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,52 - ldi r25,22 - ldi r16,2 - ldi r17,23 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,26 - ldi r25,11 - ldi r16,129 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,141 - ldi r25,133 - ldi r16,192 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,198 - ldi r25,66 - ldi r16,224 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,99 - ldi r25,33 - ldi r16,112 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,177 - ldi r25,16 - ldi r16,184 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,88 - ldi r25,8 - ldi r16,92 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,44 - ldi r25,4 - ldi r16,46 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,22 - ldi r25,2 - ldi r16,23 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,11 - ldi r25,129 - ldi r16,11 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,133 - ldi r25,192 - ldi r16,5 - ldi r17,141 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,66 - ldi r25,224 - ldi r16,130 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r22,r8 - movw r2,r10 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,128 - mov r8,r27 - ldi r27,242 - mov r9,r27 - ldi r27,44 - mov r10,r27 - ldi r27,105 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,64 - ldi r25,121 - ldi r16,150 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,160 - ldi r25,60 - ldi r16,75 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,80 - ldi r25,158 - ldi r16,37 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,40 - ldi r25,207 - ldi r16,146 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,148 - ldi r25,103 - ldi r16,73 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,202 - ldi r25,179 - ldi r16,164 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,229 - ldi r25,89 - ldi r16,210 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,242 - ldi r25,44 - ldi r16,105 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,121 - ldi r25,150 - ldi r16,52 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,60 - ldi r25,75 - ldi r16,26 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,158 - ldi r25,37 - ldi r16,13 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,207 - ldi r25,146 - ldi r16,6 - ldi r17,40 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,103 - ldi r25,73 - ldi r16,3 - ldi r17,148 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,179 - ldi r25,164 - ldi r16,1 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,89 - ldi r25,210 - mov r16,r1 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,44 - ldi r25,105 - ldi r16,128 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,150 - ldi r25,52 - ldi r16,64 - ldi r17,121 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,75 - ldi r25,26 - ldi r16,160 - ldi r17,60 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,37 - ldi r25,13 - ldi r16,80 - ldi r17,158 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,146 - ldi r25,6 - ldi r16,40 - ldi r17,207 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,73 - ldi r25,3 - ldi r16,148 - ldi r17,103 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,164 - ldi r25,1 - ldi r16,202 - ldi r17,179 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,210 - mov r25,r1 - ldi r16,229 - ldi r17,89 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,105 - ldi r25,128 - ldi r16,242 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,52 - ldi r25,64 - ldi r16,121 - ldi r17,150 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,26 - ldi r25,160 - ldi r16,60 - ldi r17,75 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,13 - ldi r25,80 - ldi r16,158 - ldi r17,37 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,6 - ldi r25,40 - ldi r16,207 - ldi r17,146 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,3 - ldi r25,148 - ldi r16,103 - ldi r17,73 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,1 - ldi r25,202 - ldi r16,179 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - mov r24,r1 - ldi r25,229 - ldi r16,89 - ldi r17,210 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r4,r8 - movw r6,r10 - dec r26 - breq 6545f - rjmp 13b -6545: - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - pop r0 - pop r0 - pop r17 - pop r16 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_96_encrypt, .-pyjamask_96_encrypt - - .text -.global pyjamask_96_decrypt - .type pyjamask_96_decrypt, @function -pyjamask_96_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 16 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - subi r30,76 - sbci r31,255 - ld r9,-Z - ld r8,-Z - ld r27,-Z - ld r26,-Z - eor r4,r26 - eor r5,r27 - eor r6,r8 - eor r7,r9 - ld r9,-Z - ld r8,-Z - ld r27,-Z - ld r26,-Z - eor r22,r26 - eor r23,r27 - eor r2,r8 - eor r3,r9 - ld r9,-Z - ld r8,-Z - ld r27,-Z - ld r26,-Z - eor r18,r26 - eor r19,r27 - eor r20,r8 - eor r21,r9 - ldi r26,14 -39: - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,33 - mov r8,r27 - ldi r27,161 - mov r9,r27 - ldi r27,55 - mov r10,r27 - ldi r27,32 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,144 - ldi r25,208 - ldi r16,27 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,72 - ldi r25,232 - ldi r16,13 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,36 - ldi r25,244 - ldi r16,6 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,18 - ldi r25,122 - ldi r16,3 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,9 - ldi r25,189 - ldi r16,1 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,222 - ldi r16,128 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,111 - ldi r16,64 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,161 - ldi r25,55 - ldi r16,32 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,208 - ldi r25,27 - ldi r16,144 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,232 - ldi r25,13 - ldi r16,72 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,244 - ldi r25,6 - ldi r16,36 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,122 - ldi r25,3 - ldi r16,18 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,189 - ldi r25,1 - ldi r16,9 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,222 - ldi r25,128 - ldi r16,132 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,111 - ldi r25,64 - ldi r16,66 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,55 - ldi r25,32 - ldi r16,33 - ldi r17,161 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,27 - ldi r25,144 - ldi r16,144 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,13 - ldi r25,72 - ldi r16,72 - ldi r17,232 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,6 - ldi r25,36 - ldi r16,36 - ldi r17,244 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,3 - ldi r25,18 - ldi r16,18 - ldi r17,122 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,1 - ldi r25,9 - ldi r16,9 - ldi r17,189 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,128 - ldi r25,132 - ldi r16,132 - ldi r17,222 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,66 - ldi r16,66 - ldi r17,111 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,32 - ldi r25,33 - ldi r16,161 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,144 - ldi r25,144 - ldi r16,208 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,72 - ldi r25,72 - ldi r16,232 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,36 - ldi r25,36 - ldi r16,244 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,18 - ldi r25,18 - ldi r16,122 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,9 - ldi r25,9 - ldi r16,189 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,132 - ldi r25,132 - ldi r16,222 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,66 - ldi r25,66 - ldi r16,111 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r18,r8 - movw r20,r10 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,160 - mov r8,r27 - ldi r27,242 - mov r9,r27 - ldi r27,143 - mov r10,r27 - ldi r27,16 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,80 - ldi r25,249 - ldi r16,71 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,168 - ldi r25,252 - ldi r16,35 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,84 - ldi r25,254 - ldi r16,17 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,42 - ldi r25,255 - ldi r16,8 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,149 - ldi r25,127 - ldi r16,132 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,202 - ldi r25,63 - ldi r16,66 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,229 - ldi r25,31 - ldi r16,33 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,242 - ldi r25,143 - ldi r16,16 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,249 - ldi r25,71 - ldi r16,8 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,252 - ldi r25,35 - ldi r16,4 - ldi r17,168 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,254 - ldi r25,17 - ldi r16,2 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,255 - ldi r25,8 - ldi r16,1 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,127 - ldi r25,132 - mov r16,r1 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,63 - ldi r25,66 - ldi r16,128 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,31 - ldi r25,33 - ldi r16,64 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,143 - ldi r25,16 - ldi r16,160 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,71 - ldi r25,8 - ldi r16,80 - ldi r17,249 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,35 - ldi r25,4 - ldi r16,168 - ldi r17,252 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,17 - ldi r25,2 - ldi r16,84 - ldi r17,254 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,8 - ldi r25,1 - ldi r16,42 - ldi r17,255 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,132 - mov r25,r1 - ldi r16,149 - ldi r17,127 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,66 - ldi r25,128 - ldi r16,202 - ldi r17,63 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,33 - ldi r25,64 - ldi r16,229 - ldi r17,31 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,16 - ldi r25,160 - ldi r16,242 - ldi r17,143 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,8 - ldi r25,80 - ldi r16,249 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,4 - ldi r25,168 - ldi r16,252 - ldi r17,35 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,2 - ldi r25,84 - ldi r16,254 - ldi r17,17 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,1 - ldi r25,42 - ldi r16,255 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - mov r24,r1 - ldi r25,149 - ldi r16,127 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,128 - ldi r25,202 - ldi r16,63 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,64 - ldi r25,229 - ldi r16,31 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r22,r8 - movw r2,r10 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,192 - mov r8,r27 - ldi r27,216 - mov r9,r27 - ldi r27,84 - mov r10,r27 - ldi r27,144 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,96 - ldi r25,108 - ldi r16,42 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,48 - ldi r25,54 - ldi r16,21 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,24 - ldi r25,155 - ldi r16,10 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,140 - ldi r25,77 - ldi r16,5 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,198 - ldi r25,166 - ldi r16,130 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,99 - ldi r25,83 - ldi r16,65 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,177 - ldi r25,169 - ldi r16,32 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,216 - ldi r25,84 - ldi r16,144 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,108 - ldi r25,42 - ldi r16,72 - ldi r17,96 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,54 - ldi r25,21 - ldi r16,36 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,155 - ldi r25,10 - ldi r16,18 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,77 - ldi r25,5 - ldi r16,9 - ldi r17,140 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,166 - ldi r25,130 - ldi r16,4 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,83 - ldi r25,65 - ldi r16,2 - ldi r17,99 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,169 - ldi r25,32 - ldi r16,129 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,84 - ldi r25,144 - ldi r16,192 - ldi r17,216 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,42 - ldi r25,72 - ldi r16,96 - ldi r17,108 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,21 - ldi r25,36 - ldi r16,48 - ldi r17,54 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,10 - ldi r25,18 - ldi r16,24 - ldi r17,155 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,5 - ldi r25,9 - ldi r16,140 - ldi r17,77 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,130 - ldi r25,4 - ldi r16,198 - ldi r17,166 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,65 - ldi r25,2 - ldi r16,99 - ldi r17,83 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,32 - ldi r25,129 - ldi r16,177 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,144 - ldi r25,192 - ldi r16,216 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,72 - ldi r25,96 - ldi r16,108 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,36 - ldi r25,48 - ldi r16,54 - ldi r17,21 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,18 - ldi r25,24 - ldi r16,155 - ldi r17,10 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,9 - ldi r25,140 - ldi r16,77 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,4 - ldi r25,198 - ldi r16,166 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,2 - ldi r25,99 - ldi r16,83 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,129 - ldi r25,177 - ldi r16,169 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r4,r8 - movw r6,r10 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - com r4 - com r5 - com r6 - com r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r18 - and r0,r4 - eor r22,r0 - mov r0,r19 - and r0,r5 - eor r23,r0 - mov r0,r20 - and r0,r6 - eor r2,r0 - mov r0,r21 - and r0,r7 - eor r3,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r18 - and r0,r22 - eor r4,r0 - mov r0,r19 - and r0,r23 - eor r5,r0 - mov r0,r20 - and r0,r2 - eor r6,r0 - mov r0,r21 - and r0,r3 - eor r7,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - ld r11,-Z - ld r10,-Z - ld r9,-Z - ld r8,-Z - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - ld r11,-Z - ld r10,-Z - ld r9,-Z - ld r8,-Z - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ld r11,-Z - ld r10,-Z - ld r9,-Z - ld r8,-Z - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - dec r26 - breq 6571f - rjmp 39b -6571: - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - pop r0 - pop r0 - pop r17 - pop r16 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_96_decrypt, .-pyjamask_96_decrypt - - .text -.global pyjamask_128_setup_key - .type pyjamask_128_setup_key, @function -pyjamask_128_setup_key: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r26,r1 -33: - movw r12,r18 - movw r14,r20 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,202 - mov r12,r27 - ldi r27,185 - mov r13,r27 - ldi r27,129 - mov r14,r27 - ldi r27,184 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,229 - ldi r25,220 - ldi r16,64 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,114 - ldi r25,110 - ldi r16,32 - ldi r17,174 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,57 - ldi r25,55 - ldi r16,16 - ldi r17,87 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,156 - ldi r25,27 - ldi r16,136 - ldi r17,171 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,206 - ldi r25,13 - ldi r16,196 - ldi r17,85 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,231 - ldi r25,6 - ldi r16,226 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,115 - ldi r25,3 - ldi r16,113 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,185 - ldi r25,129 - ldi r16,184 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,220 - ldi r25,64 - ldi r16,92 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,110 - ldi r25,32 - ldi r16,174 - ldi r17,114 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,55 - ldi r25,16 - ldi r16,87 - ldi r17,57 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,27 - ldi r25,136 - ldi r16,171 - ldi r17,156 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,13 - ldi r25,196 - ldi r16,85 - ldi r17,206 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,6 - ldi r25,226 - ldi r16,42 - ldi r17,231 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,3 - ldi r25,113 - ldi r16,149 - ldi r17,115 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,129 - ldi r25,184 - ldi r16,202 - ldi r17,185 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,92 - ldi r16,229 - ldi r17,220 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,32 - ldi r25,174 - ldi r16,114 - ldi r17,110 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,16 - ldi r25,87 - ldi r16,57 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,136 - ldi r25,171 - ldi r16,156 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,196 - ldi r25,85 - ldi r16,206 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,226 - ldi r25,42 - ldi r16,231 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,113 - ldi r25,149 - ldi r16,115 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,184 - ldi r25,202 - ldi r16,185 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,92 - ldi r25,229 - ldi r16,220 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,174 - ldi r25,114 - ldi r16,110 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,87 - ldi r25,57 - ldi r16,55 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,171 - ldi r25,156 - ldi r16,27 - ldi r17,136 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,85 - ldi r25,206 - ldi r16,13 - ldi r17,196 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,42 - ldi r25,231 - ldi r16,6 - ldi r17,226 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,149 - ldi r25,115 - ldi r16,3 - ldi r17,113 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - ldi r25,128 - eor r18,r25 - eor r18,r26 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - ldi r24,106 - eor r23,r24 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - lsl r4 - rol r5 - rol r6 - rol r7 - adc r4,r1 - ldi r17,63 - eor r6,r17 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - ldi r16,36 - eor r11,r16 - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - inc r26 - ldi r27,14 - cpse r26,r27 - rjmp 33b - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size pyjamask_128_setup_key, .-pyjamask_128_setup_key - - .text -.global pyjamask_128_encrypt - .type pyjamask_128_encrypt, @function -pyjamask_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ldi r26,14 -17: - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - mov r0,r18 - and r0,r22 - eor r8,r0 - mov r0,r19 - and r0,r23 - eor r9,r0 - mov r0,r20 - and r0,r2 - eor r10,r0 - mov r0,r21 - and r0,r3 - eor r11,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r4 - and r0,r8 - eor r22,r0 - mov r0,r5 - and r0,r9 - eor r23,r0 - mov r0,r6 - and r0,r10 - eor r2,r0 - mov r0,r7 - and r0,r11 - eor r3,r0 - mov r0,r18 - and r0,r8 - eor r4,r0 - mov r0,r19 - and r0,r9 - eor r5,r0 - mov r0,r20 - and r0,r10 - eor r6,r0 - mov r0,r21 - and r0,r11 - eor r7,r0 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - com r8 - com r9 - com r10 - com r11 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,133 - mov r12,r27 - ldi r27,16 - mov r13,r27 - ldi r27,134 - mov r14,r27 - ldi r27,163 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,8 - ldi r16,195 - ldi r17,209 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,132 - ldi r16,225 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,16 - ldi r25,194 - ldi r16,112 - ldi r17,180 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,8 - ldi r25,97 - ldi r16,56 - ldi r17,90 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,48 - ldi r16,28 - ldi r17,45 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,24 - ldi r16,142 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,12 - ldi r16,71 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,16 - ldi r25,134 - ldi r16,163 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,8 - ldi r25,195 - ldi r16,209 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,132 - ldi r25,225 - ldi r16,104 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,194 - ldi r25,112 - ldi r16,180 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,97 - ldi r25,56 - ldi r16,90 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,48 - ldi r25,28 - ldi r16,45 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,24 - ldi r25,142 - ldi r16,22 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,12 - ldi r25,71 - ldi r16,11 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,134 - ldi r25,163 - ldi r16,133 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,195 - ldi r25,209 - ldi r16,66 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,225 - ldi r25,104 - ldi r16,33 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,112 - ldi r25,180 - ldi r16,16 - ldi r17,194 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,56 - ldi r25,90 - ldi r16,8 - ldi r17,97 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,28 - ldi r25,45 - ldi r16,132 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,142 - ldi r25,22 - ldi r16,66 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,71 - ldi r25,11 - ldi r16,33 - ldi r17,12 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,163 - ldi r25,133 - ldi r16,16 - ldi r17,134 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,209 - ldi r25,66 - ldi r16,8 - ldi r17,195 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,104 - ldi r25,33 - ldi r16,132 - ldi r17,225 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,180 - ldi r25,16 - ldi r16,194 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,90 - ldi r25,8 - ldi r16,97 - ldi r17,56 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,45 - ldi r25,132 - ldi r16,48 - ldi r17,28 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,22 - ldi r25,66 - ldi r16,24 - ldi r17,142 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,11 - ldi r25,33 - ldi r16,12 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,33 - mov r12,r27 - ldi r27,112 - mov r13,r27 - ldi r27,65 - mov r14,r27 - ldi r27,99 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,16 - ldi r25,184 - ldi r16,160 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,8 - ldi r25,92 - ldi r16,208 - ldi r17,88 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,4 - ldi r25,46 - ldi r16,104 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,2 - ldi r25,23 - ldi r16,52 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,129 - ldi r25,11 - ldi r16,26 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,192 - ldi r25,5 - ldi r16,141 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,224 - ldi r25,130 - ldi r16,198 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,112 - ldi r25,65 - ldi r16,99 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,184 - ldi r25,160 - ldi r16,177 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,92 - ldi r25,208 - ldi r16,88 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,46 - ldi r25,104 - ldi r16,44 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,23 - ldi r25,52 - ldi r16,22 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,11 - ldi r25,26 - ldi r16,11 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,5 - ldi r25,141 - ldi r16,133 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,130 - ldi r25,198 - ldi r16,66 - ldi r17,224 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,65 - ldi r25,99 - ldi r16,33 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,160 - ldi r25,177 - ldi r16,16 - ldi r17,184 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,208 - ldi r25,88 - ldi r16,8 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,104 - ldi r25,44 - ldi r16,4 - ldi r17,46 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,52 - ldi r25,22 - ldi r16,2 - ldi r17,23 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,26 - ldi r25,11 - ldi r16,129 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,141 - ldi r25,133 - ldi r16,192 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,198 - ldi r25,66 - ldi r16,224 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,99 - ldi r25,33 - ldi r16,112 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,177 - ldi r25,16 - ldi r16,184 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,88 - ldi r25,8 - ldi r16,92 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,44 - ldi r25,4 - ldi r16,46 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,22 - ldi r25,2 - ldi r16,23 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,11 - ldi r25,129 - ldi r16,11 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,133 - ldi r25,192 - ldi r16,5 - ldi r17,141 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,66 - ldi r25,224 - ldi r16,130 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r22,r12 - movw r2,r14 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,128 - mov r12,r27 - ldi r27,242 - mov r13,r27 - ldi r27,44 - mov r14,r27 - ldi r27,105 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,64 - ldi r25,121 - ldi r16,150 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,160 - ldi r25,60 - ldi r16,75 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,80 - ldi r25,158 - ldi r16,37 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,40 - ldi r25,207 - ldi r16,146 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,148 - ldi r25,103 - ldi r16,73 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,202 - ldi r25,179 - ldi r16,164 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,229 - ldi r25,89 - ldi r16,210 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,242 - ldi r25,44 - ldi r16,105 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,121 - ldi r25,150 - ldi r16,52 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,60 - ldi r25,75 - ldi r16,26 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,158 - ldi r25,37 - ldi r16,13 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,207 - ldi r25,146 - ldi r16,6 - ldi r17,40 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,103 - ldi r25,73 - ldi r16,3 - ldi r17,148 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,179 - ldi r25,164 - ldi r16,1 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,89 - ldi r25,210 - mov r16,r1 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,44 - ldi r25,105 - ldi r16,128 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,150 - ldi r25,52 - ldi r16,64 - ldi r17,121 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,75 - ldi r25,26 - ldi r16,160 - ldi r17,60 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,37 - ldi r25,13 - ldi r16,80 - ldi r17,158 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,146 - ldi r25,6 - ldi r16,40 - ldi r17,207 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,73 - ldi r25,3 - ldi r16,148 - ldi r17,103 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,164 - ldi r25,1 - ldi r16,202 - ldi r17,179 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,210 - mov r25,r1 - ldi r16,229 - ldi r17,89 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,105 - ldi r25,128 - ldi r16,242 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,52 - ldi r25,64 - ldi r16,121 - ldi r17,150 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,26 - ldi r25,160 - ldi r16,60 - ldi r17,75 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,13 - ldi r25,80 - ldi r16,158 - ldi r17,37 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,6 - ldi r25,40 - ldi r16,207 - ldi r17,146 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,3 - ldi r25,148 - ldi r16,103 - ldi r17,73 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,1 - ldi r25,202 - ldi r16,179 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - mov r24,r1 - ldi r25,229 - ldi r16,89 - ldi r17,210 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r4,r12 - movw r6,r14 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r27,19 - mov r12,r27 - ldi r27,72 - mov r13,r27 - ldi r27,165 - mov r14,r27 - ldi r27,72 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,9 - ldi r25,164 - ldi r16,82 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,4 - ldi r25,82 - ldi r16,41 - ldi r17,210 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,2 - ldi r25,169 - ldi r16,20 - ldi r17,105 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,129 - ldi r25,84 - ldi r16,138 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,64 - ldi r25,42 - ldi r16,69 - ldi r17,154 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,32 - ldi r25,149 - ldi r16,34 - ldi r17,77 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,144 - ldi r25,74 - ldi r16,145 - ldi r17,38 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,72 - ldi r25,165 - ldi r16,72 - ldi r17,19 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,164 - ldi r25,82 - ldi r16,164 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,82 - ldi r25,41 - ldi r16,210 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,169 - ldi r25,20 - ldi r16,105 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,84 - ldi r25,138 - ldi r16,52 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,42 - ldi r25,69 - ldi r16,154 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,149 - ldi r25,34 - ldi r16,77 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,74 - ldi r25,145 - ldi r16,38 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,165 - ldi r25,72 - ldi r16,19 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,82 - ldi r25,164 - ldi r16,9 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,41 - ldi r25,210 - ldi r16,4 - ldi r17,82 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,20 - ldi r25,105 - ldi r16,2 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,138 - ldi r25,52 - ldi r16,129 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,69 - ldi r25,154 - ldi r16,64 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,34 - ldi r25,77 - ldi r16,32 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,145 - ldi r25,38 - ldi r16,144 - ldi r17,74 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,72 - ldi r25,19 - ldi r16,72 - ldi r17,165 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,164 - ldi r25,9 - ldi r16,164 - ldi r17,82 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,210 - ldi r25,4 - ldi r16,82 - ldi r17,41 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,105 - ldi r25,2 - ldi r16,169 - ldi r17,20 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,52 - ldi r25,129 - ldi r16,84 - ldi r17,138 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,154 - ldi r25,64 - ldi r16,42 - ldi r17,69 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,77 - ldi r25,32 - ldi r16,149 - ldi r17,34 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,38 - ldi r25,144 - ldi r16,74 - ldi r17,145 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r8,r12 - movw r10,r14 - dec r26 - breq 7055f - rjmp 17b -7055: - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_128_encrypt, .-pyjamask_128_encrypt - - .text -.global pyjamask_128_decrypt - .type pyjamask_128_decrypt, @function -pyjamask_128_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - subi r30,16 - sbci r31,255 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r8,r26 - eor r9,r27 - eor r10,r12 - eor r11,r13 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r4,r26 - eor r5,r27 - eor r6,r12 - eor r7,r13 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r22,r26 - eor r23,r27 - eor r2,r12 - eor r3,r13 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r18,r26 - eor r19,r27 - eor r20,r12 - eor r21,r13 - ldi r26,14 -51: - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,33 - mov r12,r27 - ldi r27,161 - mov r13,r27 - ldi r27,55 - mov r14,r27 - ldi r27,32 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,144 - ldi r25,208 - ldi r16,27 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,72 - ldi r25,232 - ldi r16,13 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,36 - ldi r25,244 - ldi r16,6 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,18 - ldi r25,122 - ldi r16,3 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,9 - ldi r25,189 - ldi r16,1 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,222 - ldi r16,128 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,111 - ldi r16,64 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,161 - ldi r25,55 - ldi r16,32 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,208 - ldi r25,27 - ldi r16,144 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,232 - ldi r25,13 - ldi r16,72 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,244 - ldi r25,6 - ldi r16,36 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,122 - ldi r25,3 - ldi r16,18 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,189 - ldi r25,1 - ldi r16,9 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,222 - ldi r25,128 - ldi r16,132 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,111 - ldi r25,64 - ldi r16,66 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,55 - ldi r25,32 - ldi r16,33 - ldi r17,161 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,27 - ldi r25,144 - ldi r16,144 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,13 - ldi r25,72 - ldi r16,72 - ldi r17,232 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,6 - ldi r25,36 - ldi r16,36 - ldi r17,244 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,3 - ldi r25,18 - ldi r16,18 - ldi r17,122 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,1 - ldi r25,9 - ldi r16,9 - ldi r17,189 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,128 - ldi r25,132 - ldi r16,132 - ldi r17,222 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,66 - ldi r16,66 - ldi r17,111 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,32 - ldi r25,33 - ldi r16,161 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,144 - ldi r25,144 - ldi r16,208 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,72 - ldi r25,72 - ldi r16,232 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,36 - ldi r25,36 - ldi r16,244 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,18 - ldi r25,18 - ldi r16,122 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,9 - ldi r25,9 - ldi r16,189 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,132 - ldi r25,132 - ldi r16,222 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,66 - ldi r25,66 - ldi r16,111 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,160 - mov r12,r27 - ldi r27,242 - mov r13,r27 - ldi r27,143 - mov r14,r27 - ldi r27,16 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,80 - ldi r25,249 - ldi r16,71 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,168 - ldi r25,252 - ldi r16,35 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,84 - ldi r25,254 - ldi r16,17 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,42 - ldi r25,255 - ldi r16,8 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,149 - ldi r25,127 - ldi r16,132 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,202 - ldi r25,63 - ldi r16,66 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,229 - ldi r25,31 - ldi r16,33 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,242 - ldi r25,143 - ldi r16,16 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,249 - ldi r25,71 - ldi r16,8 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,252 - ldi r25,35 - ldi r16,4 - ldi r17,168 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,254 - ldi r25,17 - ldi r16,2 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,255 - ldi r25,8 - ldi r16,1 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,127 - ldi r25,132 - mov r16,r1 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,63 - ldi r25,66 - ldi r16,128 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,31 - ldi r25,33 - ldi r16,64 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,143 - ldi r25,16 - ldi r16,160 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,71 - ldi r25,8 - ldi r16,80 - ldi r17,249 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,35 - ldi r25,4 - ldi r16,168 - ldi r17,252 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,17 - ldi r25,2 - ldi r16,84 - ldi r17,254 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,8 - ldi r25,1 - ldi r16,42 - ldi r17,255 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,132 - mov r25,r1 - ldi r16,149 - ldi r17,127 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,66 - ldi r25,128 - ldi r16,202 - ldi r17,63 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,33 - ldi r25,64 - ldi r16,229 - ldi r17,31 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,16 - ldi r25,160 - ldi r16,242 - ldi r17,143 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,8 - ldi r25,80 - ldi r16,249 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,4 - ldi r25,168 - ldi r16,252 - ldi r17,35 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,2 - ldi r25,84 - ldi r16,254 - ldi r17,17 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,1 - ldi r25,42 - ldi r16,255 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - mov r24,r1 - ldi r25,149 - ldi r16,127 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,128 - ldi r25,202 - ldi r16,63 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,64 - ldi r25,229 - ldi r16,31 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r22,r12 - movw r2,r14 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,192 - mov r12,r27 - ldi r27,216 - mov r13,r27 - ldi r27,84 - mov r14,r27 - ldi r27,144 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,96 - ldi r25,108 - ldi r16,42 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,48 - ldi r25,54 - ldi r16,21 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,24 - ldi r25,155 - ldi r16,10 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,140 - ldi r25,77 - ldi r16,5 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,198 - ldi r25,166 - ldi r16,130 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,99 - ldi r25,83 - ldi r16,65 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,177 - ldi r25,169 - ldi r16,32 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,216 - ldi r25,84 - ldi r16,144 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,108 - ldi r25,42 - ldi r16,72 - ldi r17,96 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,54 - ldi r25,21 - ldi r16,36 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,155 - ldi r25,10 - ldi r16,18 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,77 - ldi r25,5 - ldi r16,9 - ldi r17,140 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,166 - ldi r25,130 - ldi r16,4 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,83 - ldi r25,65 - ldi r16,2 - ldi r17,99 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,169 - ldi r25,32 - ldi r16,129 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,84 - ldi r25,144 - ldi r16,192 - ldi r17,216 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,42 - ldi r25,72 - ldi r16,96 - ldi r17,108 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,21 - ldi r25,36 - ldi r16,48 - ldi r17,54 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,10 - ldi r25,18 - ldi r16,24 - ldi r17,155 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,5 - ldi r25,9 - ldi r16,140 - ldi r17,77 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,130 - ldi r25,4 - ldi r16,198 - ldi r17,166 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,65 - ldi r25,2 - ldi r16,99 - ldi r17,83 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,32 - ldi r25,129 - ldi r16,177 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,144 - ldi r25,192 - ldi r16,216 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,72 - ldi r25,96 - ldi r16,108 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,36 - ldi r25,48 - ldi r16,54 - ldi r17,21 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,18 - ldi r25,24 - ldi r16,155 - ldi r17,10 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,9 - ldi r25,140 - ldi r16,77 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,4 - ldi r25,198 - ldi r16,166 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,2 - ldi r25,99 - ldi r16,83 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,129 - ldi r25,177 - ldi r16,169 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r4,r12 - movw r6,r14 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r27,23 - mov r12,r27 - ldi r27,177 - mov r13,r27 - ldi r27,84 - mov r14,r27 - ldi r27,51 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,139 - ldi r25,88 - ldi r16,170 - ldi r17,153 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,69 - ldi r25,44 - ldi r16,213 - ldi r17,204 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,34 - ldi r25,150 - ldi r16,106 - ldi r17,230 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,17 - ldi r25,75 - ldi r16,53 - ldi r17,115 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,136 - ldi r25,165 - ldi r16,154 - ldi r17,185 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,196 - ldi r25,82 - ldi r16,205 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,98 - ldi r25,169 - ldi r16,102 - ldi r17,46 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,177 - ldi r25,84 - ldi r16,51 - ldi r17,23 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,88 - ldi r25,170 - ldi r16,153 - ldi r17,139 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,44 - ldi r25,213 - ldi r16,204 - ldi r17,69 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,150 - ldi r25,106 - ldi r16,230 - ldi r17,34 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,75 - ldi r25,53 - ldi r16,115 - ldi r17,17 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,165 - ldi r25,154 - ldi r16,185 - ldi r17,136 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,82 - ldi r25,205 - ldi r16,92 - ldi r17,196 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,169 - ldi r25,102 - ldi r16,46 - ldi r17,98 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,84 - ldi r25,51 - ldi r16,23 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,170 - ldi r25,153 - ldi r16,139 - ldi r17,88 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,213 - ldi r25,204 - ldi r16,69 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,106 - ldi r25,230 - ldi r16,34 - ldi r17,150 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,53 - ldi r25,115 - ldi r16,17 - ldi r17,75 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,154 - ldi r25,185 - ldi r16,136 - ldi r17,165 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,205 - ldi r25,92 - ldi r16,196 - ldi r17,82 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,102 - ldi r25,46 - ldi r16,98 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,51 - ldi r25,23 - ldi r16,177 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,153 - ldi r25,139 - ldi r16,88 - ldi r17,170 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,204 - ldi r25,69 - ldi r16,44 - ldi r17,213 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,230 - ldi r25,34 - ldi r16,150 - ldi r17,106 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,115 - ldi r25,17 - ldi r16,75 - ldi r17,53 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,185 - ldi r25,136 - ldi r16,165 - ldi r17,154 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,92 - ldi r25,196 - ldi r16,82 - ldi r17,205 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,46 - ldi r25,98 - ldi r16,169 - ldi r17,102 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r8,r12 - movw r10,r14 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - com r8 - com r9 - com r10 - com r11 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - mov r0,r18 - and r0,r8 - eor r4,r0 - mov r0,r19 - and r0,r9 - eor r5,r0 - mov r0,r20 - and r0,r10 - eor r6,r0 - mov r0,r21 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r8 - eor r22,r0 - mov r0,r5 - and r0,r9 - eor r23,r0 - mov r0,r6 - and r0,r10 - eor r2,r0 - mov r0,r7 - and r0,r11 - eor r3,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r18 - and r0,r22 - eor r8,r0 - mov r0,r19 - and r0,r23 - eor r9,r0 - mov r0,r20 - and r0,r2 - eor r10,r0 - mov r0,r21 - and r0,r3 - eor r11,r0 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - dec r26 - breq 7089f - rjmp 51b -7089: - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_128_decrypt, .-pyjamask_128_decrypt - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.c b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.c deleted file mode 100644 index 3c40d2d..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-pyjamask.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -/** - * \brief Performs a circulant binary matrix multiplication. - * - * \param x The matrix. - * \param y The vector to multiply with the matrix. - * - * \return The vector result of multiplying x by y. - */ -STATIC_INLINE uint32_t pyjamask_matrix_multiply(uint32_t x, uint32_t y) -{ - uint32_t result = 0; - int bit; - for (bit = 31; bit >= 0; --bit) { -#if defined(ESP32) - /* This version has slightly better performance on ESP32 */ - y = leftRotate1(y); - result ^= x & -(y & 1); - x = rightRotate1(x); -#else - result ^= x & -((y >> bit) & 1); - x = rightRotate1(x); -#endif - } - return result; -} - -void pyjamask_128_setup_key - (pyjamask_128_key_schedule_t *ks, const unsigned char *key) -{ - uint32_t *rk = ks->k; - uint32_t k0, k1, k2, k3; - uint32_t temp; - uint8_t round; - - /* Load the words of the key */ - k0 = be_load_word32(key); - k1 = be_load_word32(key + 4); - k2 = be_load_word32(key + 8); - k3 = be_load_word32(key + 12); - - /* The first round key is the same as the key itself */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - rk[3] = k3; - rk += 4; - - /* Derive the round keys for all of the other rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 4) { - /* Mix the columns */ - temp = k0 ^ k1 ^ k2 ^ k3; - k0 ^= temp; - k1 ^= temp; - k2 ^= temp; - k3 ^= temp; - - /* Mix the rows and add the round constants. Note that the Pyjamask - * specification says that k1/k2/k3 should be rotated left by 8, 15, - * and 18 bits. But the reference code actually rotates the words - * right. And the test vectors in the specification match up with - * right rotations, not left. We match the reference code here */ - k0 = pyjamask_matrix_multiply(0xb881b9caU, k0) ^ 0x00000080U ^ round; - k1 = rightRotate8(k1) ^ 0x00006a00U; - k2 = rightRotate15(k2) ^ 0x003f0000U; - k3 = rightRotate18(k3) ^ 0x24000000U; - - /* Write the round key to the schedule */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - rk[3] = k3; - } -} - -void pyjamask_96_setup_key - (pyjamask_96_key_schedule_t *ks, const unsigned char *key) -{ - uint32_t *rk = ks->k; - uint32_t k0, k1, k2, k3; - uint32_t temp; - uint8_t round; - - /* Load the words of the key */ - k0 = be_load_word32(key); - k1 = be_load_word32(key + 4); - k2 = be_load_word32(key + 8); - k3 = be_load_word32(key + 12); - - /* The first round key is the same as the key itself */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - rk += 3; - - /* Derive the round keys for all of the other rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { - /* Mix the columns */ - temp = k0 ^ k1 ^ k2 ^ k3; - k0 ^= temp; - k1 ^= temp; - k2 ^= temp; - k3 ^= temp; - - /* Mix the rows and add the round constants. Note that the Pyjamask - * specification says that k1/k2/k3 should be rotated left by 8, 15, - * and 18 bits. But the reference code actually rotates the words - * right. And the test vectors in the specification match up with - * right rotations, not left. We match the reference code here */ - k0 = pyjamask_matrix_multiply(0xb881b9caU, k0) ^ 0x00000080U ^ round; - k1 = rightRotate8(k1) ^ 0x00006a00U; - k2 = rightRotate15(k2) ^ 0x003f0000U; - k3 = rightRotate18(k3) ^ 0x24000000U; - - /* Write the round key to the schedule */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - } -} - -void pyjamask_128_encrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k; - uint32_t s0, s1, s2, s3; - uint8_t round; - - /* Load the plaintext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 4) { - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - - /* Apply the 128-bit Pyjamask sbox */ - s0 ^= s3; - s3 ^= s0 & s1; - s0 ^= s1 & s2; - s1 ^= s2 & s3; - s2 ^= s0 & s3; - s2 ^= s1; - s1 ^= s0; - s3 = ~s3; - s2 ^= s3; - s3 ^= s2; - s2 ^= s3; - - /* Mix the rows of the state */ - s0 = pyjamask_matrix_multiply(0xa3861085U, s0); - s1 = pyjamask_matrix_multiply(0x63417021U, s1); - s2 = pyjamask_matrix_multiply(0x692cf280U, s2); - s3 = pyjamask_matrix_multiply(0x48a54813U, s3); - } - - /* Mix in the key one last time */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - - /* Write the ciphertext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void pyjamask_128_decrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k + 4 * PYJAMASK_ROUNDS; - uint32_t s0, s1, s2, s3; - uint8_t round; - - /* Load the ciphertext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Mix in the last round key */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - rk -= 4; - - /* Perform all decryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 4) { - /* Inverse mix of the rows in the state */ - s0 = pyjamask_matrix_multiply(0x2037a121U, s0); - s1 = pyjamask_matrix_multiply(0x108ff2a0U, s1); - s2 = pyjamask_matrix_multiply(0x9054d8c0U, s2); - s3 = pyjamask_matrix_multiply(0x3354b117U, s3); - - /* Apply the inverse of the 128-bit Pyjamask sbox */ - s2 ^= s3; - s3 ^= s2; - s2 ^= s3; - s3 = ~s3; - s1 ^= s0; - s2 ^= s1; - s2 ^= s0 & s3; - s1 ^= s2 & s3; - s0 ^= s1 & s2; - s3 ^= s0 & s1; - s0 ^= s3; - - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - } - - /* Write the plaintext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void pyjamask_96_encrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k; - uint32_t s0, s1, s2; - uint8_t round; - - /* Load the plaintext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - - /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - - /* Apply the 96-bit Pyjamask sbox */ - s0 ^= s1; - s1 ^= s2; - s2 ^= s0 & s1; - s0 ^= s1 & s2; - s1 ^= s0 & s2; - s2 ^= s0; - s2 = ~s2; - s1 ^= s0; - s0 ^= s1; - - /* Mix the rows of the state */ - s0 = pyjamask_matrix_multiply(0xa3861085U, s0); - s1 = pyjamask_matrix_multiply(0x63417021U, s1); - s2 = pyjamask_matrix_multiply(0x692cf280U, s2); - } - - /* Mix in the key one last time */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - - /* Write the ciphertext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); -} - -void pyjamask_96_decrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k + 3 * PYJAMASK_ROUNDS; - uint32_t s0, s1, s2; - uint8_t round; - - /* Load the plaintext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - - /* Mix in the last round key */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - rk -= 3; - - /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 3) { - /* Inverse mix of the rows in the state */ - s0 = pyjamask_matrix_multiply(0x2037a121U, s0); - s1 = pyjamask_matrix_multiply(0x108ff2a0U, s1); - s2 = pyjamask_matrix_multiply(0x9054d8c0U, s2); - - /* Apply the inverse of the 96-bit Pyjamask sbox */ - s0 ^= s1; - s1 ^= s0; - s2 = ~s2; - s2 ^= s0; - s1 ^= s0 & s2; - s0 ^= s1 & s2; - s2 ^= s0 & s1; - s1 ^= s2; - s0 ^= s1; - - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - } - - /* Write the ciphertext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); -} - -#endif /* !__AVR__ */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.h deleted file mode 100644 index 3ead7fb..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-pyjamask.h +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_PYJAMASK_H -#define LW_INTERNAL_PYJAMASK_H - -#include "internal-util.h" - -/** - * \file internal-pyjamask.h - * \brief Pyjamask block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Number of rounds in the Pyjamask block cipher. - */ -#define PYJAMASK_ROUNDS 14 - -/** - * \brief Number of parallel states for masked operation. - */ -#define PYJAMASK_MASKING_ORDER 4 - -/** - * \brief Structure of the key schedule for the Pyjamask-128 block cipher. - */ -typedef struct -{ - uint32_t k[(PYJAMASK_ROUNDS + 1) * 4]; /**< Words of the key schedule */ - -} pyjamask_128_key_schedule_t; - -/** - * \brief Structure of the key schedule for the Pyjamask-96 block cipher. - */ -typedef struct -{ - uint32_t k[(PYJAMASK_ROUNDS + 1) * 3]; /**< Words of the key schedule */ - -} pyjamask_96_key_schedule_t; - -/** - * \brief Structure of the key schedule for masked Pyjamask-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 4]; - -} pyjamask_masked_128_key_schedule_t; - -/** - * \brief Structure of the key schedule for masked Pyjamask-96. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 3]; - -} pyjamask_masked_96_key_schedule_t; - -/** - * \brief Sets up the key schedule for the Pyjamask-128 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_128_setup_key - (pyjamask_128_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Sets up the key schedule for the Pyjamask-96 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_96_setup_key - (pyjamask_96_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with Pyjamask-128. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_128_decrypt() - */ -void pyjamask_128_encrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with Pyjamask-128. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_128_encrypt() - */ -void pyjamask_128_decrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 96-bit block with Pyjamask-96. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_96_decrypt() - */ -void pyjamask_96_encrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 96-bit block with Pyjamask-96. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_96_encrypt() - */ -void pyjamask_96_decrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Sets up the key schedule for the masked Pyjamask-128 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_masked_128_setup_key - (pyjamask_masked_128_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Sets up the key schedule for the masked Pyjamask-96 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_masked_96_setup_key - (pyjamask_masked_96_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with Pyjamask-128 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_masked_128_decrypt() - */ -void pyjamask_masked_128_encrypt - (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with Pyjamask-128 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_masked_128_encrypt() - */ -void pyjamask_masked_128_decrypt - (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 96-bit block with Pyjamask-96 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_masked_96_decrypt() - */ -void pyjamask_masked_96_encrypt - (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 96-bit block with Pyjamask-96 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_masked_96_encrypt() - */ -void pyjamask_masked_96_decrypt - (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-util.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask-128.c b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask-128.c deleted file mode 100644 index da0fac6..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask-128.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "pyjamask.h" -#include "internal-pyjamask.h" - -aead_cipher_t const pyjamask_128_cipher = { - "Pyjamask-128-AEAD", - PYJAMASK_128_KEY_SIZE, - PYJAMASK_128_NONCE_SIZE, - PYJAMASK_128_TAG_SIZE, - AEAD_FLAG_NONE, - pyjamask_128_aead_encrypt, - pyjamask_128_aead_decrypt -}; - -#define OCB_ALG_NAME pyjamask_128 -#define OCB_BLOCK_SIZE 16 -#define OCB_NONCE_SIZE PYJAMASK_128_NONCE_SIZE -#define OCB_TAG_SIZE PYJAMASK_128_TAG_SIZE -#define OCB_KEY_SCHEDULE pyjamask_128_key_schedule_t -#define OCB_SETUP_KEY pyjamask_128_setup_key -#define OCB_ENCRYPT_BLOCK pyjamask_128_encrypt -#define OCB_DECRYPT_BLOCK pyjamask_128_decrypt -#include "internal-ocb.h" diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask.h deleted file mode 100644 index 23ec744..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys-avr/pyjamask.h +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_PYJAMASK_H -#define LWCRYPTO_PYJAMASK_H - -#include "aead-common.h" - -/** - * \file pyjamask.h - * \brief Pyjamask authenticated encryption algorithm. - * - * Pyjamask AEAD is a family of authenticated encryption algorithms that are - * built around the Pyjamask-128 and Pyjamask-96 block ciphers in OCB mode. - * Pyjamask-128-AEAD has a 128-bit key, a 96-bit nonce, and a 128-bit - * authentication tag. Pyjamask-96-AEAD has a 128-bit key, a 64-bit nonce, - * and a 96-bit authentication tag. - * - * References: https://pyjamask-cipher.github.io/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Pyjamask-128-AEAD. - */ -#define PYJAMASK_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Pyjamask-128-AEAD. - */ -#define PYJAMASK_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Pyjamask-128-AEAD. - */ -#define PYJAMASK_128_NONCE_SIZE 12 - -/** - * \brief Size of the key for Pyjamask-96-AEAD. - */ -#define PYJAMASK_96_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Pyjamask-96-AEAD. - */ -#define PYJAMASK_96_TAG_SIZE 12 - -/** - * \brief Size of the nonce for Pyjamask-96-AEAD. - */ -#define PYJAMASK_96_NONCE_SIZE 8 - -/** - * \brief Meta-information block for the Pyjamask-128-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_128_cipher; - -/** - * \brief Meta-information block for the Pyjamask-96-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_96_cipher; - -/** - * \brief Meta-information block for the masked Pyjamask-128-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_masked_128_cipher; - -/** - * \brief Meta-information block for the masked Pyjamask-96-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_masked_96_cipher; - -/** - * \brief Encrypts and authenticates a packet with Pyjamask-128-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_128_aead_decrypt() - */ -int pyjamask_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Pyjamask-128-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_128_aead_encrypt() - */ -int pyjamask_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Pyjamask-96-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 12 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_96_aead_decrypt() - */ -int pyjamask_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Pyjamask-96-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 12 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_96_aead_encrypt() - */ -int pyjamask_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with masked Pyjamask-128-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_masked_128_aead_decrypt() - */ -int pyjamask_masked_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with masked Pyjamask-128-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_masked_128_aead_encrypt() - */ -int pyjamask_masked_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with masked Pyjamask-96-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 12 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_masked_96_aead_decrypt() - */ -int pyjamask_masked_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with masked Pyjamask-96-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 12 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_masked_96_aead_encrypt() - */ -int pyjamask_masked_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-ocb.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-ocb.h index de544ba..98f2a31 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-ocb.h +++ b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-ocb.h @@ -62,7 +62,9 @@ #define OCB_DOUBLE_L OCB_CONCAT(OCB_ALG_NAME,_double_l) -/* Double a value in GF(128) - default implementation */ +#if OCB_BLOCK_SIZE == 16 + +/* Double a value in GF(128) */ static void OCB_DOUBLE_L(unsigned char out[16], const unsigned char in[16]) { unsigned index; @@ -72,6 +74,24 @@ static void OCB_DOUBLE_L(unsigned char out[16], const unsigned char in[16]) out[15] = (in[15] << 1) ^ (mask & 0x87); } +#elif OCB_BLOCK_SIZE == 12 + +/* Double a value in GF(96) */ +static void OCB_DOUBLE_L + (unsigned char out[12], const unsigned char in[12]) +{ + unsigned index; + unsigned char mask = (unsigned char)(((signed char)in[0]) >> 7); + for (index = 0; index < 11; ++index) + out[index] = (in[index] << 1) | (in[index + 1] >> 7); + out[11] = (in[11] << 1) ^ (mask & 0x41); + out[10] ^= (mask & 0x06); +} + +#else +#error "Unknown block size for OCB" +#endif + #endif /* State information for OCB functions */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask-avr.S b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask-avr.S new file mode 100644 index 0000000..b7cc631 --- /dev/null +++ b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask-avr.S @@ -0,0 +1,8883 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global pyjamask_96_setup_key + .type pyjamask_96_setup_key, @function +pyjamask_96_setup_key: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + mov r26,r1 +29: + movw r12,r18 + movw r14,r20 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,202 + mov r12,r27 + ldi r27,185 + mov r13,r27 + ldi r27,129 + mov r14,r27 + ldi r27,184 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,229 + ldi r25,220 + ldi r16,64 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,114 + ldi r25,110 + ldi r16,32 + ldi r17,174 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,57 + ldi r25,55 + ldi r16,16 + ldi r17,87 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,156 + ldi r25,27 + ldi r16,136 + ldi r17,171 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,206 + ldi r25,13 + ldi r16,196 + ldi r17,85 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,231 + ldi r25,6 + ldi r16,226 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,115 + ldi r25,3 + ldi r16,113 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,185 + ldi r25,129 + ldi r16,184 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,220 + ldi r25,64 + ldi r16,92 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,110 + ldi r25,32 + ldi r16,174 + ldi r17,114 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,55 + ldi r25,16 + ldi r16,87 + ldi r17,57 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,27 + ldi r25,136 + ldi r16,171 + ldi r17,156 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,13 + ldi r25,196 + ldi r16,85 + ldi r17,206 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,6 + ldi r25,226 + ldi r16,42 + ldi r17,231 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,3 + ldi r25,113 + ldi r16,149 + ldi r17,115 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,129 + ldi r25,184 + ldi r16,202 + ldi r17,185 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,92 + ldi r16,229 + ldi r17,220 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,32 + ldi r25,174 + ldi r16,114 + ldi r17,110 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,16 + ldi r25,87 + ldi r16,57 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,136 + ldi r25,171 + ldi r16,156 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,196 + ldi r25,85 + ldi r16,206 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,226 + ldi r25,42 + ldi r16,231 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,113 + ldi r25,149 + ldi r16,115 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,184 + ldi r25,202 + ldi r16,185 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,92 + ldi r25,229 + ldi r16,220 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,174 + ldi r25,114 + ldi r16,110 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,87 + ldi r25,57 + ldi r16,55 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,171 + ldi r25,156 + ldi r16,27 + ldi r17,136 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,85 + ldi r25,206 + ldi r16,13 + ldi r17,196 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,42 + ldi r25,231 + ldi r16,6 + ldi r17,226 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,149 + ldi r25,115 + ldi r16,3 + ldi r17,113 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + ldi r25,128 + eor r18,r25 + eor r18,r26 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + ldi r24,106 + eor r23,r24 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + lsl r4 + rol r5 + rol r6 + rol r7 + adc r4,r1 + ldi r17,63 + eor r6,r17 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + ldi r16,36 + eor r11,r16 + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + inc r26 + ldi r27,14 + cpse r26,r27 + rjmp 29b + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size pyjamask_96_setup_key, .-pyjamask_96_setup_key + + .text +.global pyjamask_96_encrypt + .type pyjamask_96_encrypt, @function +pyjamask_96_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 16 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ldi r26,14 +13: + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r18 + and r0,r22 + eor r4,r0 + mov r0,r19 + and r0,r23 + eor r5,r0 + mov r0,r20 + and r0,r2 + eor r6,r0 + mov r0,r21 + and r0,r3 + eor r7,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r18 + and r0,r4 + eor r22,r0 + mov r0,r19 + and r0,r5 + eor r23,r0 + mov r0,r20 + and r0,r6 + eor r2,r0 + mov r0,r21 + and r0,r7 + eor r3,r0 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + com r4 + com r5 + com r6 + com r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,133 + mov r8,r27 + ldi r27,16 + mov r9,r27 + ldi r27,134 + mov r10,r27 + ldi r27,163 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,8 + ldi r16,195 + ldi r17,209 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,132 + ldi r16,225 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,16 + ldi r25,194 + ldi r16,112 + ldi r17,180 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,8 + ldi r25,97 + ldi r16,56 + ldi r17,90 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,48 + ldi r16,28 + ldi r17,45 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,24 + ldi r16,142 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,12 + ldi r16,71 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,16 + ldi r25,134 + ldi r16,163 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,8 + ldi r25,195 + ldi r16,209 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,132 + ldi r25,225 + ldi r16,104 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,194 + ldi r25,112 + ldi r16,180 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,97 + ldi r25,56 + ldi r16,90 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,48 + ldi r25,28 + ldi r16,45 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,24 + ldi r25,142 + ldi r16,22 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,12 + ldi r25,71 + ldi r16,11 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,134 + ldi r25,163 + ldi r16,133 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,195 + ldi r25,209 + ldi r16,66 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,225 + ldi r25,104 + ldi r16,33 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,112 + ldi r25,180 + ldi r16,16 + ldi r17,194 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,56 + ldi r25,90 + ldi r16,8 + ldi r17,97 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,28 + ldi r25,45 + ldi r16,132 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,142 + ldi r25,22 + ldi r16,66 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,71 + ldi r25,11 + ldi r16,33 + ldi r17,12 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,163 + ldi r25,133 + ldi r16,16 + ldi r17,134 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,209 + ldi r25,66 + ldi r16,8 + ldi r17,195 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,104 + ldi r25,33 + ldi r16,132 + ldi r17,225 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,180 + ldi r25,16 + ldi r16,194 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,90 + ldi r25,8 + ldi r16,97 + ldi r17,56 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,45 + ldi r25,132 + ldi r16,48 + ldi r17,28 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,22 + ldi r25,66 + ldi r16,24 + ldi r17,142 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,11 + ldi r25,33 + ldi r16,12 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r18,r8 + movw r20,r10 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,33 + mov r8,r27 + ldi r27,112 + mov r9,r27 + ldi r27,65 + mov r10,r27 + ldi r27,99 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,16 + ldi r25,184 + ldi r16,160 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,8 + ldi r25,92 + ldi r16,208 + ldi r17,88 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,4 + ldi r25,46 + ldi r16,104 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,2 + ldi r25,23 + ldi r16,52 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,129 + ldi r25,11 + ldi r16,26 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,192 + ldi r25,5 + ldi r16,141 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,224 + ldi r25,130 + ldi r16,198 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,112 + ldi r25,65 + ldi r16,99 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,184 + ldi r25,160 + ldi r16,177 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,92 + ldi r25,208 + ldi r16,88 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,46 + ldi r25,104 + ldi r16,44 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,23 + ldi r25,52 + ldi r16,22 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,11 + ldi r25,26 + ldi r16,11 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,5 + ldi r25,141 + ldi r16,133 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,130 + ldi r25,198 + ldi r16,66 + ldi r17,224 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,65 + ldi r25,99 + ldi r16,33 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,160 + ldi r25,177 + ldi r16,16 + ldi r17,184 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,208 + ldi r25,88 + ldi r16,8 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,104 + ldi r25,44 + ldi r16,4 + ldi r17,46 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,52 + ldi r25,22 + ldi r16,2 + ldi r17,23 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,26 + ldi r25,11 + ldi r16,129 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,141 + ldi r25,133 + ldi r16,192 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,198 + ldi r25,66 + ldi r16,224 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,99 + ldi r25,33 + ldi r16,112 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,177 + ldi r25,16 + ldi r16,184 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,88 + ldi r25,8 + ldi r16,92 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,44 + ldi r25,4 + ldi r16,46 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,22 + ldi r25,2 + ldi r16,23 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,11 + ldi r25,129 + ldi r16,11 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,133 + ldi r25,192 + ldi r16,5 + ldi r17,141 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,66 + ldi r25,224 + ldi r16,130 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r22,r8 + movw r2,r10 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,128 + mov r8,r27 + ldi r27,242 + mov r9,r27 + ldi r27,44 + mov r10,r27 + ldi r27,105 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,64 + ldi r25,121 + ldi r16,150 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,160 + ldi r25,60 + ldi r16,75 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,80 + ldi r25,158 + ldi r16,37 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,40 + ldi r25,207 + ldi r16,146 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,148 + ldi r25,103 + ldi r16,73 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,202 + ldi r25,179 + ldi r16,164 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,229 + ldi r25,89 + ldi r16,210 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,242 + ldi r25,44 + ldi r16,105 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,121 + ldi r25,150 + ldi r16,52 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,60 + ldi r25,75 + ldi r16,26 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,158 + ldi r25,37 + ldi r16,13 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,207 + ldi r25,146 + ldi r16,6 + ldi r17,40 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,103 + ldi r25,73 + ldi r16,3 + ldi r17,148 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,179 + ldi r25,164 + ldi r16,1 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,89 + ldi r25,210 + mov r16,r1 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,44 + ldi r25,105 + ldi r16,128 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,150 + ldi r25,52 + ldi r16,64 + ldi r17,121 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,75 + ldi r25,26 + ldi r16,160 + ldi r17,60 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,37 + ldi r25,13 + ldi r16,80 + ldi r17,158 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,146 + ldi r25,6 + ldi r16,40 + ldi r17,207 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,73 + ldi r25,3 + ldi r16,148 + ldi r17,103 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,164 + ldi r25,1 + ldi r16,202 + ldi r17,179 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,210 + mov r25,r1 + ldi r16,229 + ldi r17,89 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,105 + ldi r25,128 + ldi r16,242 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,52 + ldi r25,64 + ldi r16,121 + ldi r17,150 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,26 + ldi r25,160 + ldi r16,60 + ldi r17,75 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,13 + ldi r25,80 + ldi r16,158 + ldi r17,37 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,6 + ldi r25,40 + ldi r16,207 + ldi r17,146 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,3 + ldi r25,148 + ldi r16,103 + ldi r17,73 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,1 + ldi r25,202 + ldi r16,179 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + mov r24,r1 + ldi r25,229 + ldi r16,89 + ldi r17,210 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r4,r8 + movw r6,r10 + dec r26 + breq 6545f + rjmp 13b +6545: + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + pop r0 + pop r0 + pop r17 + pop r16 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_96_encrypt, .-pyjamask_96_encrypt + + .text +.global pyjamask_96_decrypt + .type pyjamask_96_decrypt, @function +pyjamask_96_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 16 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + subi r30,76 + sbci r31,255 + ld r9,-Z + ld r8,-Z + ld r27,-Z + ld r26,-Z + eor r4,r26 + eor r5,r27 + eor r6,r8 + eor r7,r9 + ld r9,-Z + ld r8,-Z + ld r27,-Z + ld r26,-Z + eor r22,r26 + eor r23,r27 + eor r2,r8 + eor r3,r9 + ld r9,-Z + ld r8,-Z + ld r27,-Z + ld r26,-Z + eor r18,r26 + eor r19,r27 + eor r20,r8 + eor r21,r9 + ldi r26,14 +39: + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,33 + mov r8,r27 + ldi r27,161 + mov r9,r27 + ldi r27,55 + mov r10,r27 + ldi r27,32 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,144 + ldi r25,208 + ldi r16,27 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,72 + ldi r25,232 + ldi r16,13 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,36 + ldi r25,244 + ldi r16,6 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,18 + ldi r25,122 + ldi r16,3 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,9 + ldi r25,189 + ldi r16,1 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,222 + ldi r16,128 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,111 + ldi r16,64 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,161 + ldi r25,55 + ldi r16,32 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,208 + ldi r25,27 + ldi r16,144 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,232 + ldi r25,13 + ldi r16,72 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,244 + ldi r25,6 + ldi r16,36 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,122 + ldi r25,3 + ldi r16,18 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,189 + ldi r25,1 + ldi r16,9 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,222 + ldi r25,128 + ldi r16,132 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,111 + ldi r25,64 + ldi r16,66 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,55 + ldi r25,32 + ldi r16,33 + ldi r17,161 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,27 + ldi r25,144 + ldi r16,144 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,13 + ldi r25,72 + ldi r16,72 + ldi r17,232 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,6 + ldi r25,36 + ldi r16,36 + ldi r17,244 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,3 + ldi r25,18 + ldi r16,18 + ldi r17,122 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,1 + ldi r25,9 + ldi r16,9 + ldi r17,189 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,128 + ldi r25,132 + ldi r16,132 + ldi r17,222 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,66 + ldi r16,66 + ldi r17,111 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,32 + ldi r25,33 + ldi r16,161 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,144 + ldi r25,144 + ldi r16,208 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,72 + ldi r25,72 + ldi r16,232 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,36 + ldi r25,36 + ldi r16,244 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,18 + ldi r25,18 + ldi r16,122 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,9 + ldi r25,9 + ldi r16,189 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,132 + ldi r25,132 + ldi r16,222 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,66 + ldi r25,66 + ldi r16,111 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r18,r8 + movw r20,r10 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,160 + mov r8,r27 + ldi r27,242 + mov r9,r27 + ldi r27,143 + mov r10,r27 + ldi r27,16 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,80 + ldi r25,249 + ldi r16,71 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,168 + ldi r25,252 + ldi r16,35 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,84 + ldi r25,254 + ldi r16,17 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,42 + ldi r25,255 + ldi r16,8 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,149 + ldi r25,127 + ldi r16,132 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,202 + ldi r25,63 + ldi r16,66 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,229 + ldi r25,31 + ldi r16,33 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,242 + ldi r25,143 + ldi r16,16 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,249 + ldi r25,71 + ldi r16,8 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,252 + ldi r25,35 + ldi r16,4 + ldi r17,168 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,254 + ldi r25,17 + ldi r16,2 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,255 + ldi r25,8 + ldi r16,1 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,127 + ldi r25,132 + mov r16,r1 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,63 + ldi r25,66 + ldi r16,128 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,31 + ldi r25,33 + ldi r16,64 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,143 + ldi r25,16 + ldi r16,160 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,71 + ldi r25,8 + ldi r16,80 + ldi r17,249 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,35 + ldi r25,4 + ldi r16,168 + ldi r17,252 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,17 + ldi r25,2 + ldi r16,84 + ldi r17,254 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,8 + ldi r25,1 + ldi r16,42 + ldi r17,255 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,132 + mov r25,r1 + ldi r16,149 + ldi r17,127 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,66 + ldi r25,128 + ldi r16,202 + ldi r17,63 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,33 + ldi r25,64 + ldi r16,229 + ldi r17,31 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,16 + ldi r25,160 + ldi r16,242 + ldi r17,143 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,8 + ldi r25,80 + ldi r16,249 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,4 + ldi r25,168 + ldi r16,252 + ldi r17,35 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,2 + ldi r25,84 + ldi r16,254 + ldi r17,17 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,1 + ldi r25,42 + ldi r16,255 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + mov r24,r1 + ldi r25,149 + ldi r16,127 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,128 + ldi r25,202 + ldi r16,63 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,64 + ldi r25,229 + ldi r16,31 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r22,r8 + movw r2,r10 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,192 + mov r8,r27 + ldi r27,216 + mov r9,r27 + ldi r27,84 + mov r10,r27 + ldi r27,144 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,96 + ldi r25,108 + ldi r16,42 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,48 + ldi r25,54 + ldi r16,21 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,24 + ldi r25,155 + ldi r16,10 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,140 + ldi r25,77 + ldi r16,5 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,198 + ldi r25,166 + ldi r16,130 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,99 + ldi r25,83 + ldi r16,65 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,177 + ldi r25,169 + ldi r16,32 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,216 + ldi r25,84 + ldi r16,144 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,108 + ldi r25,42 + ldi r16,72 + ldi r17,96 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,54 + ldi r25,21 + ldi r16,36 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,155 + ldi r25,10 + ldi r16,18 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,77 + ldi r25,5 + ldi r16,9 + ldi r17,140 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,166 + ldi r25,130 + ldi r16,4 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,83 + ldi r25,65 + ldi r16,2 + ldi r17,99 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,169 + ldi r25,32 + ldi r16,129 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,84 + ldi r25,144 + ldi r16,192 + ldi r17,216 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,42 + ldi r25,72 + ldi r16,96 + ldi r17,108 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,21 + ldi r25,36 + ldi r16,48 + ldi r17,54 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,10 + ldi r25,18 + ldi r16,24 + ldi r17,155 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,5 + ldi r25,9 + ldi r16,140 + ldi r17,77 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,130 + ldi r25,4 + ldi r16,198 + ldi r17,166 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,65 + ldi r25,2 + ldi r16,99 + ldi r17,83 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,32 + ldi r25,129 + ldi r16,177 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,144 + ldi r25,192 + ldi r16,216 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,72 + ldi r25,96 + ldi r16,108 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,36 + ldi r25,48 + ldi r16,54 + ldi r17,21 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,18 + ldi r25,24 + ldi r16,155 + ldi r17,10 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,9 + ldi r25,140 + ldi r16,77 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,4 + ldi r25,198 + ldi r16,166 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,2 + ldi r25,99 + ldi r16,83 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,129 + ldi r25,177 + ldi r16,169 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r4,r8 + movw r6,r10 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + com r4 + com r5 + com r6 + com r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r18 + and r0,r4 + eor r22,r0 + mov r0,r19 + and r0,r5 + eor r23,r0 + mov r0,r20 + and r0,r6 + eor r2,r0 + mov r0,r21 + and r0,r7 + eor r3,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r18 + and r0,r22 + eor r4,r0 + mov r0,r19 + and r0,r23 + eor r5,r0 + mov r0,r20 + and r0,r2 + eor r6,r0 + mov r0,r21 + and r0,r3 + eor r7,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + ld r11,-Z + ld r10,-Z + ld r9,-Z + ld r8,-Z + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + ld r11,-Z + ld r10,-Z + ld r9,-Z + ld r8,-Z + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ld r11,-Z + ld r10,-Z + ld r9,-Z + ld r8,-Z + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + dec r26 + breq 6571f + rjmp 39b +6571: + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + pop r0 + pop r0 + pop r17 + pop r16 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_96_decrypt, .-pyjamask_96_decrypt + + .text +.global pyjamask_128_setup_key + .type pyjamask_128_setup_key, @function +pyjamask_128_setup_key: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r26,r1 +33: + movw r12,r18 + movw r14,r20 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,202 + mov r12,r27 + ldi r27,185 + mov r13,r27 + ldi r27,129 + mov r14,r27 + ldi r27,184 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,229 + ldi r25,220 + ldi r16,64 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,114 + ldi r25,110 + ldi r16,32 + ldi r17,174 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,57 + ldi r25,55 + ldi r16,16 + ldi r17,87 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,156 + ldi r25,27 + ldi r16,136 + ldi r17,171 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,206 + ldi r25,13 + ldi r16,196 + ldi r17,85 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,231 + ldi r25,6 + ldi r16,226 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,115 + ldi r25,3 + ldi r16,113 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,185 + ldi r25,129 + ldi r16,184 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,220 + ldi r25,64 + ldi r16,92 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,110 + ldi r25,32 + ldi r16,174 + ldi r17,114 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,55 + ldi r25,16 + ldi r16,87 + ldi r17,57 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,27 + ldi r25,136 + ldi r16,171 + ldi r17,156 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,13 + ldi r25,196 + ldi r16,85 + ldi r17,206 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,6 + ldi r25,226 + ldi r16,42 + ldi r17,231 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,3 + ldi r25,113 + ldi r16,149 + ldi r17,115 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,129 + ldi r25,184 + ldi r16,202 + ldi r17,185 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,92 + ldi r16,229 + ldi r17,220 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,32 + ldi r25,174 + ldi r16,114 + ldi r17,110 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,16 + ldi r25,87 + ldi r16,57 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,136 + ldi r25,171 + ldi r16,156 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,196 + ldi r25,85 + ldi r16,206 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,226 + ldi r25,42 + ldi r16,231 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,113 + ldi r25,149 + ldi r16,115 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,184 + ldi r25,202 + ldi r16,185 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,92 + ldi r25,229 + ldi r16,220 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,174 + ldi r25,114 + ldi r16,110 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,87 + ldi r25,57 + ldi r16,55 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,171 + ldi r25,156 + ldi r16,27 + ldi r17,136 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,85 + ldi r25,206 + ldi r16,13 + ldi r17,196 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,42 + ldi r25,231 + ldi r16,6 + ldi r17,226 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,149 + ldi r25,115 + ldi r16,3 + ldi r17,113 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + ldi r25,128 + eor r18,r25 + eor r18,r26 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + ldi r24,106 + eor r23,r24 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + lsl r4 + rol r5 + rol r6 + rol r7 + adc r4,r1 + ldi r17,63 + eor r6,r17 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + ldi r16,36 + eor r11,r16 + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + inc r26 + ldi r27,14 + cpse r26,r27 + rjmp 33b + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size pyjamask_128_setup_key, .-pyjamask_128_setup_key + + .text +.global pyjamask_128_encrypt + .type pyjamask_128_encrypt, @function +pyjamask_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ldi r26,14 +17: + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + mov r0,r18 + and r0,r22 + eor r8,r0 + mov r0,r19 + and r0,r23 + eor r9,r0 + mov r0,r20 + and r0,r2 + eor r10,r0 + mov r0,r21 + and r0,r3 + eor r11,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r4 + and r0,r8 + eor r22,r0 + mov r0,r5 + and r0,r9 + eor r23,r0 + mov r0,r6 + and r0,r10 + eor r2,r0 + mov r0,r7 + and r0,r11 + eor r3,r0 + mov r0,r18 + and r0,r8 + eor r4,r0 + mov r0,r19 + and r0,r9 + eor r5,r0 + mov r0,r20 + and r0,r10 + eor r6,r0 + mov r0,r21 + and r0,r11 + eor r7,r0 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + com r8 + com r9 + com r10 + com r11 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,133 + mov r12,r27 + ldi r27,16 + mov r13,r27 + ldi r27,134 + mov r14,r27 + ldi r27,163 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,8 + ldi r16,195 + ldi r17,209 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,132 + ldi r16,225 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,16 + ldi r25,194 + ldi r16,112 + ldi r17,180 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,8 + ldi r25,97 + ldi r16,56 + ldi r17,90 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,48 + ldi r16,28 + ldi r17,45 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,24 + ldi r16,142 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,12 + ldi r16,71 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,16 + ldi r25,134 + ldi r16,163 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,8 + ldi r25,195 + ldi r16,209 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,132 + ldi r25,225 + ldi r16,104 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,194 + ldi r25,112 + ldi r16,180 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,97 + ldi r25,56 + ldi r16,90 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,48 + ldi r25,28 + ldi r16,45 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,24 + ldi r25,142 + ldi r16,22 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,12 + ldi r25,71 + ldi r16,11 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,134 + ldi r25,163 + ldi r16,133 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,195 + ldi r25,209 + ldi r16,66 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,225 + ldi r25,104 + ldi r16,33 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,112 + ldi r25,180 + ldi r16,16 + ldi r17,194 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,56 + ldi r25,90 + ldi r16,8 + ldi r17,97 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,28 + ldi r25,45 + ldi r16,132 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,142 + ldi r25,22 + ldi r16,66 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,71 + ldi r25,11 + ldi r16,33 + ldi r17,12 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,163 + ldi r25,133 + ldi r16,16 + ldi r17,134 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,209 + ldi r25,66 + ldi r16,8 + ldi r17,195 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,104 + ldi r25,33 + ldi r16,132 + ldi r17,225 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,180 + ldi r25,16 + ldi r16,194 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,90 + ldi r25,8 + ldi r16,97 + ldi r17,56 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,45 + ldi r25,132 + ldi r16,48 + ldi r17,28 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,22 + ldi r25,66 + ldi r16,24 + ldi r17,142 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,11 + ldi r25,33 + ldi r16,12 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,33 + mov r12,r27 + ldi r27,112 + mov r13,r27 + ldi r27,65 + mov r14,r27 + ldi r27,99 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,16 + ldi r25,184 + ldi r16,160 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,8 + ldi r25,92 + ldi r16,208 + ldi r17,88 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,4 + ldi r25,46 + ldi r16,104 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,2 + ldi r25,23 + ldi r16,52 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,129 + ldi r25,11 + ldi r16,26 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,192 + ldi r25,5 + ldi r16,141 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,224 + ldi r25,130 + ldi r16,198 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,112 + ldi r25,65 + ldi r16,99 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,184 + ldi r25,160 + ldi r16,177 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,92 + ldi r25,208 + ldi r16,88 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,46 + ldi r25,104 + ldi r16,44 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,23 + ldi r25,52 + ldi r16,22 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,11 + ldi r25,26 + ldi r16,11 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,5 + ldi r25,141 + ldi r16,133 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,130 + ldi r25,198 + ldi r16,66 + ldi r17,224 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,65 + ldi r25,99 + ldi r16,33 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,160 + ldi r25,177 + ldi r16,16 + ldi r17,184 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,208 + ldi r25,88 + ldi r16,8 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,104 + ldi r25,44 + ldi r16,4 + ldi r17,46 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,52 + ldi r25,22 + ldi r16,2 + ldi r17,23 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,26 + ldi r25,11 + ldi r16,129 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,141 + ldi r25,133 + ldi r16,192 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,198 + ldi r25,66 + ldi r16,224 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,99 + ldi r25,33 + ldi r16,112 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,177 + ldi r25,16 + ldi r16,184 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,88 + ldi r25,8 + ldi r16,92 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,44 + ldi r25,4 + ldi r16,46 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,22 + ldi r25,2 + ldi r16,23 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,11 + ldi r25,129 + ldi r16,11 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,133 + ldi r25,192 + ldi r16,5 + ldi r17,141 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,66 + ldi r25,224 + ldi r16,130 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r22,r12 + movw r2,r14 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,128 + mov r12,r27 + ldi r27,242 + mov r13,r27 + ldi r27,44 + mov r14,r27 + ldi r27,105 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,64 + ldi r25,121 + ldi r16,150 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,160 + ldi r25,60 + ldi r16,75 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,80 + ldi r25,158 + ldi r16,37 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,40 + ldi r25,207 + ldi r16,146 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,148 + ldi r25,103 + ldi r16,73 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,202 + ldi r25,179 + ldi r16,164 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,229 + ldi r25,89 + ldi r16,210 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,242 + ldi r25,44 + ldi r16,105 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,121 + ldi r25,150 + ldi r16,52 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,60 + ldi r25,75 + ldi r16,26 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,158 + ldi r25,37 + ldi r16,13 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,207 + ldi r25,146 + ldi r16,6 + ldi r17,40 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,103 + ldi r25,73 + ldi r16,3 + ldi r17,148 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,179 + ldi r25,164 + ldi r16,1 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,89 + ldi r25,210 + mov r16,r1 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,44 + ldi r25,105 + ldi r16,128 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,150 + ldi r25,52 + ldi r16,64 + ldi r17,121 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,75 + ldi r25,26 + ldi r16,160 + ldi r17,60 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,37 + ldi r25,13 + ldi r16,80 + ldi r17,158 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,146 + ldi r25,6 + ldi r16,40 + ldi r17,207 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,73 + ldi r25,3 + ldi r16,148 + ldi r17,103 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,164 + ldi r25,1 + ldi r16,202 + ldi r17,179 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,210 + mov r25,r1 + ldi r16,229 + ldi r17,89 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,105 + ldi r25,128 + ldi r16,242 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,52 + ldi r25,64 + ldi r16,121 + ldi r17,150 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,26 + ldi r25,160 + ldi r16,60 + ldi r17,75 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,13 + ldi r25,80 + ldi r16,158 + ldi r17,37 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,6 + ldi r25,40 + ldi r16,207 + ldi r17,146 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,3 + ldi r25,148 + ldi r16,103 + ldi r17,73 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,1 + ldi r25,202 + ldi r16,179 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + mov r24,r1 + ldi r25,229 + ldi r16,89 + ldi r17,210 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r4,r12 + movw r6,r14 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r27,19 + mov r12,r27 + ldi r27,72 + mov r13,r27 + ldi r27,165 + mov r14,r27 + ldi r27,72 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,9 + ldi r25,164 + ldi r16,82 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,4 + ldi r25,82 + ldi r16,41 + ldi r17,210 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,2 + ldi r25,169 + ldi r16,20 + ldi r17,105 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,129 + ldi r25,84 + ldi r16,138 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,64 + ldi r25,42 + ldi r16,69 + ldi r17,154 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,32 + ldi r25,149 + ldi r16,34 + ldi r17,77 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,144 + ldi r25,74 + ldi r16,145 + ldi r17,38 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,72 + ldi r25,165 + ldi r16,72 + ldi r17,19 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,164 + ldi r25,82 + ldi r16,164 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,82 + ldi r25,41 + ldi r16,210 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,169 + ldi r25,20 + ldi r16,105 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,84 + ldi r25,138 + ldi r16,52 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,42 + ldi r25,69 + ldi r16,154 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,149 + ldi r25,34 + ldi r16,77 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,74 + ldi r25,145 + ldi r16,38 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,165 + ldi r25,72 + ldi r16,19 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,82 + ldi r25,164 + ldi r16,9 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,41 + ldi r25,210 + ldi r16,4 + ldi r17,82 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,20 + ldi r25,105 + ldi r16,2 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,138 + ldi r25,52 + ldi r16,129 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,69 + ldi r25,154 + ldi r16,64 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,34 + ldi r25,77 + ldi r16,32 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,145 + ldi r25,38 + ldi r16,144 + ldi r17,74 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,72 + ldi r25,19 + ldi r16,72 + ldi r17,165 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,164 + ldi r25,9 + ldi r16,164 + ldi r17,82 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,210 + ldi r25,4 + ldi r16,82 + ldi r17,41 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,105 + ldi r25,2 + ldi r16,169 + ldi r17,20 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,52 + ldi r25,129 + ldi r16,84 + ldi r17,138 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,154 + ldi r25,64 + ldi r16,42 + ldi r17,69 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,77 + ldi r25,32 + ldi r16,149 + ldi r17,34 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,38 + ldi r25,144 + ldi r16,74 + ldi r17,145 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r8,r12 + movw r10,r14 + dec r26 + breq 7055f + rjmp 17b +7055: + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_128_encrypt, .-pyjamask_128_encrypt + + .text +.global pyjamask_128_decrypt + .type pyjamask_128_decrypt, @function +pyjamask_128_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + subi r30,16 + sbci r31,255 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r8,r26 + eor r9,r27 + eor r10,r12 + eor r11,r13 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r4,r26 + eor r5,r27 + eor r6,r12 + eor r7,r13 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r22,r26 + eor r23,r27 + eor r2,r12 + eor r3,r13 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r18,r26 + eor r19,r27 + eor r20,r12 + eor r21,r13 + ldi r26,14 +51: + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,33 + mov r12,r27 + ldi r27,161 + mov r13,r27 + ldi r27,55 + mov r14,r27 + ldi r27,32 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,144 + ldi r25,208 + ldi r16,27 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,72 + ldi r25,232 + ldi r16,13 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,36 + ldi r25,244 + ldi r16,6 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,18 + ldi r25,122 + ldi r16,3 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,9 + ldi r25,189 + ldi r16,1 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,222 + ldi r16,128 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,111 + ldi r16,64 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,161 + ldi r25,55 + ldi r16,32 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,208 + ldi r25,27 + ldi r16,144 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,232 + ldi r25,13 + ldi r16,72 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,244 + ldi r25,6 + ldi r16,36 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,122 + ldi r25,3 + ldi r16,18 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,189 + ldi r25,1 + ldi r16,9 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,222 + ldi r25,128 + ldi r16,132 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,111 + ldi r25,64 + ldi r16,66 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,55 + ldi r25,32 + ldi r16,33 + ldi r17,161 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,27 + ldi r25,144 + ldi r16,144 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,13 + ldi r25,72 + ldi r16,72 + ldi r17,232 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,6 + ldi r25,36 + ldi r16,36 + ldi r17,244 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,3 + ldi r25,18 + ldi r16,18 + ldi r17,122 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,1 + ldi r25,9 + ldi r16,9 + ldi r17,189 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,128 + ldi r25,132 + ldi r16,132 + ldi r17,222 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,66 + ldi r16,66 + ldi r17,111 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,32 + ldi r25,33 + ldi r16,161 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,144 + ldi r25,144 + ldi r16,208 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,72 + ldi r25,72 + ldi r16,232 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,36 + ldi r25,36 + ldi r16,244 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,18 + ldi r25,18 + ldi r16,122 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,9 + ldi r25,9 + ldi r16,189 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,132 + ldi r25,132 + ldi r16,222 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,66 + ldi r25,66 + ldi r16,111 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,160 + mov r12,r27 + ldi r27,242 + mov r13,r27 + ldi r27,143 + mov r14,r27 + ldi r27,16 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,80 + ldi r25,249 + ldi r16,71 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,168 + ldi r25,252 + ldi r16,35 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,84 + ldi r25,254 + ldi r16,17 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,42 + ldi r25,255 + ldi r16,8 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,149 + ldi r25,127 + ldi r16,132 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,202 + ldi r25,63 + ldi r16,66 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,229 + ldi r25,31 + ldi r16,33 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,242 + ldi r25,143 + ldi r16,16 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,249 + ldi r25,71 + ldi r16,8 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,252 + ldi r25,35 + ldi r16,4 + ldi r17,168 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,254 + ldi r25,17 + ldi r16,2 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,255 + ldi r25,8 + ldi r16,1 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,127 + ldi r25,132 + mov r16,r1 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,63 + ldi r25,66 + ldi r16,128 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,31 + ldi r25,33 + ldi r16,64 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,143 + ldi r25,16 + ldi r16,160 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,71 + ldi r25,8 + ldi r16,80 + ldi r17,249 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,35 + ldi r25,4 + ldi r16,168 + ldi r17,252 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,17 + ldi r25,2 + ldi r16,84 + ldi r17,254 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,8 + ldi r25,1 + ldi r16,42 + ldi r17,255 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,132 + mov r25,r1 + ldi r16,149 + ldi r17,127 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,66 + ldi r25,128 + ldi r16,202 + ldi r17,63 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,33 + ldi r25,64 + ldi r16,229 + ldi r17,31 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,16 + ldi r25,160 + ldi r16,242 + ldi r17,143 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,8 + ldi r25,80 + ldi r16,249 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,4 + ldi r25,168 + ldi r16,252 + ldi r17,35 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,2 + ldi r25,84 + ldi r16,254 + ldi r17,17 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,1 + ldi r25,42 + ldi r16,255 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + mov r24,r1 + ldi r25,149 + ldi r16,127 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,128 + ldi r25,202 + ldi r16,63 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,64 + ldi r25,229 + ldi r16,31 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r22,r12 + movw r2,r14 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,192 + mov r12,r27 + ldi r27,216 + mov r13,r27 + ldi r27,84 + mov r14,r27 + ldi r27,144 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,96 + ldi r25,108 + ldi r16,42 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,48 + ldi r25,54 + ldi r16,21 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,24 + ldi r25,155 + ldi r16,10 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,140 + ldi r25,77 + ldi r16,5 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,198 + ldi r25,166 + ldi r16,130 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,99 + ldi r25,83 + ldi r16,65 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,177 + ldi r25,169 + ldi r16,32 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,216 + ldi r25,84 + ldi r16,144 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,108 + ldi r25,42 + ldi r16,72 + ldi r17,96 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,54 + ldi r25,21 + ldi r16,36 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,155 + ldi r25,10 + ldi r16,18 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,77 + ldi r25,5 + ldi r16,9 + ldi r17,140 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,166 + ldi r25,130 + ldi r16,4 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,83 + ldi r25,65 + ldi r16,2 + ldi r17,99 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,169 + ldi r25,32 + ldi r16,129 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,84 + ldi r25,144 + ldi r16,192 + ldi r17,216 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,42 + ldi r25,72 + ldi r16,96 + ldi r17,108 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,21 + ldi r25,36 + ldi r16,48 + ldi r17,54 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,10 + ldi r25,18 + ldi r16,24 + ldi r17,155 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,5 + ldi r25,9 + ldi r16,140 + ldi r17,77 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,130 + ldi r25,4 + ldi r16,198 + ldi r17,166 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,65 + ldi r25,2 + ldi r16,99 + ldi r17,83 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,32 + ldi r25,129 + ldi r16,177 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,144 + ldi r25,192 + ldi r16,216 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,72 + ldi r25,96 + ldi r16,108 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,36 + ldi r25,48 + ldi r16,54 + ldi r17,21 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,18 + ldi r25,24 + ldi r16,155 + ldi r17,10 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,9 + ldi r25,140 + ldi r16,77 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,4 + ldi r25,198 + ldi r16,166 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,2 + ldi r25,99 + ldi r16,83 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,129 + ldi r25,177 + ldi r16,169 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r4,r12 + movw r6,r14 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r27,23 + mov r12,r27 + ldi r27,177 + mov r13,r27 + ldi r27,84 + mov r14,r27 + ldi r27,51 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,139 + ldi r25,88 + ldi r16,170 + ldi r17,153 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,69 + ldi r25,44 + ldi r16,213 + ldi r17,204 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,34 + ldi r25,150 + ldi r16,106 + ldi r17,230 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,17 + ldi r25,75 + ldi r16,53 + ldi r17,115 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,136 + ldi r25,165 + ldi r16,154 + ldi r17,185 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,196 + ldi r25,82 + ldi r16,205 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,98 + ldi r25,169 + ldi r16,102 + ldi r17,46 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,177 + ldi r25,84 + ldi r16,51 + ldi r17,23 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,88 + ldi r25,170 + ldi r16,153 + ldi r17,139 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,44 + ldi r25,213 + ldi r16,204 + ldi r17,69 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,150 + ldi r25,106 + ldi r16,230 + ldi r17,34 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,75 + ldi r25,53 + ldi r16,115 + ldi r17,17 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,165 + ldi r25,154 + ldi r16,185 + ldi r17,136 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,82 + ldi r25,205 + ldi r16,92 + ldi r17,196 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,169 + ldi r25,102 + ldi r16,46 + ldi r17,98 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,84 + ldi r25,51 + ldi r16,23 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,170 + ldi r25,153 + ldi r16,139 + ldi r17,88 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,213 + ldi r25,204 + ldi r16,69 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,106 + ldi r25,230 + ldi r16,34 + ldi r17,150 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,53 + ldi r25,115 + ldi r16,17 + ldi r17,75 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,154 + ldi r25,185 + ldi r16,136 + ldi r17,165 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,205 + ldi r25,92 + ldi r16,196 + ldi r17,82 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,102 + ldi r25,46 + ldi r16,98 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,51 + ldi r25,23 + ldi r16,177 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,153 + ldi r25,139 + ldi r16,88 + ldi r17,170 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,204 + ldi r25,69 + ldi r16,44 + ldi r17,213 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,230 + ldi r25,34 + ldi r16,150 + ldi r17,106 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,115 + ldi r25,17 + ldi r16,75 + ldi r17,53 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,185 + ldi r25,136 + ldi r16,165 + ldi r17,154 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,92 + ldi r25,196 + ldi r16,82 + ldi r17,205 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,46 + ldi r25,98 + ldi r16,169 + ldi r17,102 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r8,r12 + movw r10,r14 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + com r8 + com r9 + com r10 + com r11 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + mov r0,r18 + and r0,r8 + eor r4,r0 + mov r0,r19 + and r0,r9 + eor r5,r0 + mov r0,r20 + and r0,r10 + eor r6,r0 + mov r0,r21 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r8 + eor r22,r0 + mov r0,r5 + and r0,r9 + eor r23,r0 + mov r0,r6 + and r0,r10 + eor r2,r0 + mov r0,r7 + and r0,r11 + eor r3,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r18 + and r0,r22 + eor r8,r0 + mov r0,r19 + and r0,r23 + eor r9,r0 + mov r0,r20 + and r0,r2 + eor r10,r0 + mov r0,r21 + and r0,r3 + eor r11,r0 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + dec r26 + breq 7089f + rjmp 51b +7089: + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_128_decrypt, .-pyjamask_128_decrypt + +#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.c b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.c index f3a5655..3c40d2d 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.c +++ b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.c @@ -23,6 +23,8 @@ #include "internal-pyjamask.h" #include "internal-util.h" +#if !defined(__AVR__) + /** * \brief Performs a circulant binary matrix multiplication. * @@ -49,7 +51,8 @@ STATIC_INLINE uint32_t pyjamask_matrix_multiply(uint32_t x, uint32_t y) return result; } -void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key) +void pyjamask_128_setup_key + (pyjamask_128_key_schedule_t *ks, const unsigned char *key) { uint32_t *rk = ks->k; uint32_t k0, k1, k2, k3; @@ -96,8 +99,54 @@ void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key) } } +void pyjamask_96_setup_key + (pyjamask_96_key_schedule_t *ks, const unsigned char *key) +{ + uint32_t *rk = ks->k; + uint32_t k0, k1, k2, k3; + uint32_t temp; + uint8_t round; + + /* Load the words of the key */ + k0 = be_load_word32(key); + k1 = be_load_word32(key + 4); + k2 = be_load_word32(key + 8); + k3 = be_load_word32(key + 12); + + /* The first round key is the same as the key itself */ + rk[0] = k0; + rk[1] = k1; + rk[2] = k2; + rk += 3; + + /* Derive the round keys for all of the other rounds */ + for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { + /* Mix the columns */ + temp = k0 ^ k1 ^ k2 ^ k3; + k0 ^= temp; + k1 ^= temp; + k2 ^= temp; + k3 ^= temp; + + /* Mix the rows and add the round constants. Note that the Pyjamask + * specification says that k1/k2/k3 should be rotated left by 8, 15, + * and 18 bits. But the reference code actually rotates the words + * right. And the test vectors in the specification match up with + * right rotations, not left. We match the reference code here */ + k0 = pyjamask_matrix_multiply(0xb881b9caU, k0) ^ 0x00000080U ^ round; + k1 = rightRotate8(k1) ^ 0x00006a00U; + k2 = rightRotate15(k2) ^ 0x003f0000U; + k3 = rightRotate18(k3) ^ 0x24000000U; + + /* Write the round key to the schedule */ + rk[0] = k0; + rk[1] = k1; + rk[2] = k2; + } +} + void pyjamask_128_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { const uint32_t *rk = ks->k; @@ -152,7 +201,7 @@ void pyjamask_128_encrypt } void pyjamask_128_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { const uint32_t *rk = ks->k + 4 * PYJAMASK_ROUNDS; @@ -208,7 +257,7 @@ void pyjamask_128_decrypt } void pyjamask_96_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { const uint32_t *rk = ks->k; @@ -221,7 +270,7 @@ void pyjamask_96_encrypt s2 = be_load_word32(input + 8); /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 4) { + for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { /* Add the round key to the state */ s0 ^= rk[0]; s1 ^= rk[1]; @@ -256,10 +305,10 @@ void pyjamask_96_encrypt } void pyjamask_96_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - const uint32_t *rk = ks->k + 4 * PYJAMASK_ROUNDS; + const uint32_t *rk = ks->k + 3 * PYJAMASK_ROUNDS; uint32_t s0, s1, s2; uint8_t round; @@ -272,10 +321,10 @@ void pyjamask_96_decrypt s0 ^= rk[0]; s1 ^= rk[1]; s2 ^= rk[2]; - rk -= 4; + rk -= 3; /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 4) { + for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 3) { /* Inverse mix of the rows in the state */ s0 = pyjamask_matrix_multiply(0x2037a121U, s0); s1 = pyjamask_matrix_multiply(0x108ff2a0U, s1); @@ -303,3 +352,5 @@ void pyjamask_96_decrypt be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); } + +#endif /* !__AVR__ */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.h index 3fd93a7..3ead7fb 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.h +++ b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-pyjamask.h @@ -45,31 +45,60 @@ extern "C" { #define PYJAMASK_MASKING_ORDER 4 /** - * \brief Structure of the key schedule for Pyjamask block ciphers. + * \brief Structure of the key schedule for the Pyjamask-128 block cipher. */ typedef struct { uint32_t k[(PYJAMASK_ROUNDS + 1) * 4]; /**< Words of the key schedule */ -} pyjamask_key_schedule_t; +} pyjamask_128_key_schedule_t; /** - * \brief Structure of the key schedule for masked Pyjamask block ciphers. + * \brief Structure of the key schedule for the Pyjamask-96 block cipher. + */ +typedef struct +{ + uint32_t k[(PYJAMASK_ROUNDS + 1) * 3]; /**< Words of the key schedule */ + +} pyjamask_96_key_schedule_t; + +/** + * \brief Structure of the key schedule for masked Pyjamask-128. */ typedef struct { /** Words of the key schedule */ uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 4]; -} pyjamask_masked_key_schedule_t; +} pyjamask_masked_128_key_schedule_t; + +/** + * \brief Structure of the key schedule for masked Pyjamask-96. + */ +typedef struct +{ + /** Words of the key schedule */ + uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 3]; + +} pyjamask_masked_96_key_schedule_t; /** - * \brief Sets up the key schedule for the Pyjamask block cipher. + * \brief Sets up the key schedule for the Pyjamask-128 block cipher. * * \param ks The key schedule on output. * \param key The 16 bytes of the key on input. */ -void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key); +void pyjamask_128_setup_key + (pyjamask_128_key_schedule_t *ks, const unsigned char *key); + +/** + * \brief Sets up the key schedule for the Pyjamask-96 block cipher. + * + * \param ks The key schedule on output. + * \param key The 16 bytes of the key on input. + */ +void pyjamask_96_setup_key + (pyjamask_96_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with Pyjamask-128. @@ -84,7 +113,7 @@ void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key); * \sa pyjamask_128_decrypt() */ void pyjamask_128_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -100,7 +129,7 @@ void pyjamask_128_encrypt * \sa pyjamask_128_encrypt() */ void pyjamask_128_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -116,7 +145,7 @@ void pyjamask_128_decrypt * \sa pyjamask_96_decrypt() */ void pyjamask_96_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -132,17 +161,26 @@ void pyjamask_96_encrypt * \sa pyjamask_96_encrypt() */ void pyjamask_96_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** - * \brief Sets up the key schedule for the masked Pyjamask block cipher. + * \brief Sets up the key schedule for the masked Pyjamask-128 block cipher. + * + * \param ks The key schedule on output. + * \param key The 16 bytes of the key on input. + */ +void pyjamask_masked_128_setup_key + (pyjamask_masked_128_key_schedule_t *ks, const unsigned char *key); + +/** + * \brief Sets up the key schedule for the masked Pyjamask-96 block cipher. * * \param ks The key schedule on output. * \param key The 16 bytes of the key on input. */ -void pyjamask_masked_setup_key - (pyjamask_masked_key_schedule_t *ks, const unsigned char *key); +void pyjamask_masked_96_setup_key + (pyjamask_masked_96_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with Pyjamask-128 in masked mode. @@ -157,7 +195,7 @@ void pyjamask_masked_setup_key * \sa pyjamask_masked_128_decrypt() */ void pyjamask_masked_128_encrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -173,7 +211,7 @@ void pyjamask_masked_128_encrypt * \sa pyjamask_masked_128_encrypt() */ void pyjamask_masked_128_decrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -189,7 +227,7 @@ void pyjamask_masked_128_decrypt * \sa pyjamask_masked_96_decrypt() */ void pyjamask_masked_96_encrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -205,7 +243,7 @@ void pyjamask_masked_96_encrypt * \sa pyjamask_masked_96_encrypt() */ void pyjamask_masked_96_decrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); #ifdef __cplusplus diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-util.h b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-util.h +++ b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/pyjamask-128.c b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/pyjamask-128.c index a70a32f..da0fac6 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/pyjamask-128.c +++ b/pyjamask/Implementations/crypto_aead/pyjamask128aeadv1/rhys/pyjamask-128.c @@ -37,8 +37,8 @@ aead_cipher_t const pyjamask_128_cipher = { #define OCB_BLOCK_SIZE 16 #define OCB_NONCE_SIZE PYJAMASK_128_NONCE_SIZE #define OCB_TAG_SIZE PYJAMASK_128_TAG_SIZE -#define OCB_KEY_SCHEDULE pyjamask_key_schedule_t -#define OCB_SETUP_KEY pyjamask_setup_key +#define OCB_KEY_SCHEDULE pyjamask_128_key_schedule_t +#define OCB_SETUP_KEY pyjamask_128_setup_key #define OCB_ENCRYPT_BLOCK pyjamask_128_encrypt #define OCB_DECRYPT_BLOCK pyjamask_128_decrypt #include "internal-ocb.h" diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.c b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/api.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/api.h deleted file mode 100644 index bd8cdcb..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 8 -#define CRYPTO_ABYTES 12 -#define CRYPTO_NOOVERLAP 1 diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/encrypt.c b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/encrypt.c deleted file mode 100644 index f09b0ed..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "pyjamask.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return pyjamask_96_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return pyjamask_96_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-ocb.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-ocb.h deleted file mode 100644 index 98f2a31..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-ocb.h +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_OCB_H -#define LW_INTERNAL_OCB_H - -#include "internal-util.h" -#include - -/* We expect a number of macros to be defined before this file - * is included to configure the underlying block cipher: - * - * OCB_ALG_NAME Name of the algorithm that is using OCB mode. - * OCB_BLOCK_SIZE Size of the block for the underlying cipher in bytes. - * OCB_NONCE_SIZE Size of the nonce which must be < OCB_BLOCK_SIZE. - * OCB_TAG_SIZE Size of the authentication tag. - * OCB_KEY_SCHEDULE Type for the key schedule. - * OCB_SETUP_KEY Name of the key schedule setup function. - * OCB_ENCRYPT_BLOCK Name of the block cipher ECB encrypt function. - * OCB_DECRYPT_BLOCK Name of the block cipher ECB decrypt function. - * OCB_DOUBLE_L Name of the function to double L (optional). - */ -#if defined(OCB_ENCRYPT_BLOCK) - -/** - * \file internal-ocb.h - * \brief Internal implementation of the OCB block cipher mode. - * - * Note that OCB is covered by patents so it may not be usable in all - * applications. Open source applications should be covered, but for - * others you will need to contact the patent authors to find out - * if you can use it or if a paid license is required. - * - * License information: https://web.cs.ucdavis.edu/~rogaway/ocb/license.htm - * - * References: https://tools.ietf.org/html/rfc7253 - */ - -#define OCB_CONCAT_INNER(name,suffix) name##suffix -#define OCB_CONCAT(name,suffix) OCB_CONCAT_INNER(name,suffix) - -#if !defined(OCB_DOUBLE_L) - -#define OCB_DOUBLE_L OCB_CONCAT(OCB_ALG_NAME,_double_l) - -#if OCB_BLOCK_SIZE == 16 - -/* Double a value in GF(128) */ -static void OCB_DOUBLE_L(unsigned char out[16], const unsigned char in[16]) -{ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)in[0]) >> 7); - for (index = 0; index < 15; ++index) - out[index] = (in[index] << 1) | (in[index + 1] >> 7); - out[15] = (in[15] << 1) ^ (mask & 0x87); -} - -#elif OCB_BLOCK_SIZE == 12 - -/* Double a value in GF(96) */ -static void OCB_DOUBLE_L - (unsigned char out[12], const unsigned char in[12]) -{ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)in[0]) >> 7); - for (index = 0; index < 11; ++index) - out[index] = (in[index] << 1) | (in[index + 1] >> 7); - out[11] = (in[11] << 1) ^ (mask & 0x41); - out[10] ^= (mask & 0x06); -} - -#else -#error "Unknown block size for OCB" -#endif - -#endif - -/* State information for OCB functions */ -#define OCB_STATE OCB_CONCAT(OCB_ALG_NAME,_state_t) -typedef struct -{ - OCB_KEY_SCHEDULE ks; - unsigned char Lstar[OCB_BLOCK_SIZE]; - unsigned char Ldollar[OCB_BLOCK_SIZE]; - unsigned char L0[OCB_BLOCK_SIZE]; - unsigned char L1[OCB_BLOCK_SIZE]; - -} OCB_STATE; - -/* Initializes the OCB state from the key and nonce */ -static void OCB_CONCAT(OCB_ALG_NAME,_init) - (OCB_STATE *state, const unsigned char *k, const unsigned char *nonce, - unsigned char offset[OCB_BLOCK_SIZE]) -{ - unsigned bottom; - - /* Set up the key schedule */ - OCB_SETUP_KEY(&(state->ks), k); - - /* Derive the values of L*, L$, L0, and L1 */ - memset(state->Lstar, 0, sizeof(state->Lstar)); - OCB_ENCRYPT_BLOCK(&(state->ks), state->Lstar, state->Lstar); - OCB_DOUBLE_L(state->Ldollar, state->Lstar); - OCB_DOUBLE_L(state->L0, state->Ldollar); - OCB_DOUBLE_L(state->L1, state->L0); - - /* Derive the initial offset from the nonce */ - memset(offset, 0, OCB_BLOCK_SIZE); - memcpy(offset + OCB_BLOCK_SIZE - OCB_NONCE_SIZE, nonce, OCB_NONCE_SIZE); - offset[0] = ((OCB_TAG_SIZE * 8) & 0x7F) << 1; - offset[OCB_BLOCK_SIZE - OCB_NONCE_SIZE - 1] |= 0x01; - bottom = offset[OCB_BLOCK_SIZE - 1] & 0x3F; - offset[OCB_BLOCK_SIZE - 1] &= 0xC0; - { - unsigned index; - unsigned byte_posn = bottom / 8; -#if OCB_BLOCK_SIZE == 16 - /* Standard OCB with a 128-bit block */ - unsigned char stretch[24]; - OCB_ENCRYPT_BLOCK(&(state->ks), stretch, offset); - memcpy(stretch + 16, stretch + 1, 8); - lw_xor_block(stretch + 16, stretch, 8); -#elif OCB_BLOCK_SIZE == 12 - /* 96-bit block handling from the Pyjamask specification */ - unsigned char stretch[20]; - OCB_ENCRYPT_BLOCK(&(state->ks), stretch, offset); - for (index = 0; index < 8; ++index) { - stretch[index + 12] = - (stretch[index + 1] << 1) | (stretch[index + 2] >> 7); - } - lw_xor_block(stretch + 12, stretch, 8); -#else - unsigned char stretch[OCB_BLOCK_SIZE + 8] = {0}; - #error "unsupported block size for OCB mode" -#endif - bottom %= 8; - if (bottom != 0) { - for (index = 0; index < OCB_BLOCK_SIZE; ++index) { - offset[index] = - (stretch[index + byte_posn] << bottom) | - (stretch[index + byte_posn + 1] >> (8 - bottom)); - } - } else { - memcpy(offset, stretch + byte_posn, OCB_BLOCK_SIZE); - } - } -} - -/* Calculate L_{ntz(i)} when the last two bits of i are zero */ -static void OCB_CONCAT(OCB_ALG_NAME,_calculate_L) - (OCB_STATE *state, unsigned char L[OCB_BLOCK_SIZE], unsigned long long i) -{ - OCB_DOUBLE_L(L, state->L1); - i >>= 2; - while ((i & 1) == 0) { - OCB_DOUBLE_L(L, L); - i >>= 1; - } -} - -/* Process associated data with OCB */ -static void OCB_CONCAT(OCB_ALG_NAME,_process_ad) - (OCB_STATE *state, unsigned char tag[OCB_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char offset[OCB_BLOCK_SIZE]; - unsigned char block[OCB_BLOCK_SIZE]; - unsigned long long block_number; - - /* Process all full blocks */ - memset(offset, 0, sizeof(offset)); - block_number = 1; - while (adlen >= OCB_BLOCK_SIZE) { - if (block_number & 1) { - lw_xor_block(offset, state->L0, OCB_BLOCK_SIZE); - } else if ((block_number & 3) == 2) { - lw_xor_block(offset, state->L1, OCB_BLOCK_SIZE); - } else { - OCB_CONCAT(OCB_ALG_NAME,_calculate_L)(state, block, block_number); - lw_xor_block(offset, block, OCB_BLOCK_SIZE); - } - lw_xor_block_2_src(block, offset, ad, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state->ks), block, block); - lw_xor_block(tag, block, OCB_BLOCK_SIZE); - ad += OCB_BLOCK_SIZE; - adlen -= OCB_BLOCK_SIZE; - ++block_number; - } - - /* Pad and process the last partial block */ - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(offset, state->Lstar, OCB_BLOCK_SIZE); - lw_xor_block(offset, ad, temp); - offset[temp] ^= 0x80; - OCB_ENCRYPT_BLOCK(&(state->ks), block, offset); - lw_xor_block(tag, block, OCB_BLOCK_SIZE); - } -} - -int OCB_CONCAT(OCB_ALG_NAME,_aead_encrypt) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - OCB_STATE state; - unsigned char offset[OCB_BLOCK_SIZE]; - unsigned char sum[OCB_BLOCK_SIZE]; - unsigned char block[OCB_BLOCK_SIZE]; - unsigned long long block_number; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + OCB_TAG_SIZE; - - /* Initialize the OCB state */ - OCB_CONCAT(OCB_ALG_NAME,_init)(&state, k, npub, offset); - - /* Process all plaintext blocks except the last */ - memset(sum, 0, sizeof(sum)); - block_number = 1; - while (mlen >= OCB_BLOCK_SIZE) { - if (block_number & 1) { - lw_xor_block(offset, state.L0, OCB_BLOCK_SIZE); - } else if ((block_number & 3) == 2) { - lw_xor_block(offset, state.L1, OCB_BLOCK_SIZE); - } else { - OCB_CONCAT(OCB_ALG_NAME,_calculate_L)(&state, block, block_number); - lw_xor_block(offset, block, OCB_BLOCK_SIZE); - } - lw_xor_block(sum, m, OCB_BLOCK_SIZE); - lw_xor_block_2_src(block, offset, m, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), block, block); - lw_xor_block_2_src(c, block, offset, OCB_BLOCK_SIZE); - c += OCB_BLOCK_SIZE; - m += OCB_BLOCK_SIZE; - mlen -= OCB_BLOCK_SIZE; - ++block_number; - } - - /* Pad and process the last plaintext block */ - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - lw_xor_block(offset, state.Lstar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), block, offset); - lw_xor_block_2_src(c, block, m, temp); - c += temp; - } - - /* Finalize the encryption phase */ - lw_xor_block(sum, offset, OCB_BLOCK_SIZE); - lw_xor_block(sum, state.Ldollar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), sum, sum); - - /* Process the associated data and compute the final authentication tag */ - OCB_CONCAT(OCB_ALG_NAME,_process_ad)(&state, sum, ad, adlen); - memcpy(c, sum, OCB_TAG_SIZE); - return 0; -} - -int OCB_CONCAT(OCB_ALG_NAME,_aead_decrypt) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - OCB_STATE state; - unsigned char *mtemp = m; - unsigned char offset[OCB_BLOCK_SIZE]; - unsigned char sum[OCB_BLOCK_SIZE]; - unsigned char block[OCB_BLOCK_SIZE]; - unsigned long long block_number; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < OCB_TAG_SIZE) - return -1; - *mlen = clen - OCB_TAG_SIZE; - - /* Initialize the OCB state */ - OCB_CONCAT(OCB_ALG_NAME,_init)(&state, k, npub, offset); - - /* Process all ciphertext blocks except the last */ - memset(sum, 0, sizeof(sum)); - block_number = 1; - clen -= OCB_TAG_SIZE; - while (clen >= OCB_BLOCK_SIZE) { - if (block_number & 1) { - lw_xor_block(offset, state.L0, OCB_BLOCK_SIZE); - } else if ((block_number & 3) == 2) { - lw_xor_block(offset, state.L1, OCB_BLOCK_SIZE); - } else { - OCB_CONCAT(OCB_ALG_NAME,_calculate_L)(&state, block, block_number); - lw_xor_block(offset, block, OCB_BLOCK_SIZE); - } - lw_xor_block_2_src(block, offset, c, OCB_BLOCK_SIZE); - OCB_DECRYPT_BLOCK(&(state.ks), block, block); - lw_xor_block_2_src(m, block, offset, OCB_BLOCK_SIZE); - lw_xor_block(sum, m, OCB_BLOCK_SIZE); - c += OCB_BLOCK_SIZE; - m += OCB_BLOCK_SIZE; - clen -= OCB_BLOCK_SIZE; - ++block_number; - } - - /* Pad and process the last ciphertext block */ - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block(offset, state.Lstar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), block, offset); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - c += temp; - } - - /* Finalize the decryption phase */ - lw_xor_block(sum, offset, OCB_BLOCK_SIZE); - lw_xor_block(sum, state.Ldollar, OCB_BLOCK_SIZE); - OCB_ENCRYPT_BLOCK(&(state.ks), sum, sum); - - /* Process the associated data and check the final authentication tag */ - OCB_CONCAT(OCB_ALG_NAME,_process_ad)(&state, sum, ad, adlen); - return aead_check_tag(mtemp, *mlen, sum, c, OCB_TAG_SIZE); -} - -#endif /* OCB_ENCRYPT_BLOCK */ - -#endif /* LW_INTERNAL_OCB_H */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask-avr.S b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask-avr.S deleted file mode 100644 index b7cc631..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask-avr.S +++ /dev/null @@ -1,8883 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global pyjamask_96_setup_key - .type pyjamask_96_setup_key, @function -pyjamask_96_setup_key: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - mov r26,r1 -29: - movw r12,r18 - movw r14,r20 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,202 - mov r12,r27 - ldi r27,185 - mov r13,r27 - ldi r27,129 - mov r14,r27 - ldi r27,184 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,229 - ldi r25,220 - ldi r16,64 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,114 - ldi r25,110 - ldi r16,32 - ldi r17,174 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,57 - ldi r25,55 - ldi r16,16 - ldi r17,87 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,156 - ldi r25,27 - ldi r16,136 - ldi r17,171 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,206 - ldi r25,13 - ldi r16,196 - ldi r17,85 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,231 - ldi r25,6 - ldi r16,226 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,115 - ldi r25,3 - ldi r16,113 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,185 - ldi r25,129 - ldi r16,184 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,220 - ldi r25,64 - ldi r16,92 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,110 - ldi r25,32 - ldi r16,174 - ldi r17,114 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,55 - ldi r25,16 - ldi r16,87 - ldi r17,57 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,27 - ldi r25,136 - ldi r16,171 - ldi r17,156 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,13 - ldi r25,196 - ldi r16,85 - ldi r17,206 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,6 - ldi r25,226 - ldi r16,42 - ldi r17,231 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,3 - ldi r25,113 - ldi r16,149 - ldi r17,115 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,129 - ldi r25,184 - ldi r16,202 - ldi r17,185 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,92 - ldi r16,229 - ldi r17,220 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,32 - ldi r25,174 - ldi r16,114 - ldi r17,110 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,16 - ldi r25,87 - ldi r16,57 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,136 - ldi r25,171 - ldi r16,156 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,196 - ldi r25,85 - ldi r16,206 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,226 - ldi r25,42 - ldi r16,231 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,113 - ldi r25,149 - ldi r16,115 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,184 - ldi r25,202 - ldi r16,185 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,92 - ldi r25,229 - ldi r16,220 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,174 - ldi r25,114 - ldi r16,110 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,87 - ldi r25,57 - ldi r16,55 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,171 - ldi r25,156 - ldi r16,27 - ldi r17,136 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,85 - ldi r25,206 - ldi r16,13 - ldi r17,196 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,42 - ldi r25,231 - ldi r16,6 - ldi r17,226 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,149 - ldi r25,115 - ldi r16,3 - ldi r17,113 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - ldi r25,128 - eor r18,r25 - eor r18,r26 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - ldi r24,106 - eor r23,r24 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - lsl r4 - rol r5 - rol r6 - rol r7 - adc r4,r1 - ldi r17,63 - eor r6,r17 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - ldi r16,36 - eor r11,r16 - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - inc r26 - ldi r27,14 - cpse r26,r27 - rjmp 29b - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size pyjamask_96_setup_key, .-pyjamask_96_setup_key - - .text -.global pyjamask_96_encrypt - .type pyjamask_96_encrypt, @function -pyjamask_96_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 16 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ldi r26,14 -13: - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r18 - and r0,r22 - eor r4,r0 - mov r0,r19 - and r0,r23 - eor r5,r0 - mov r0,r20 - and r0,r2 - eor r6,r0 - mov r0,r21 - and r0,r3 - eor r7,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r18 - and r0,r4 - eor r22,r0 - mov r0,r19 - and r0,r5 - eor r23,r0 - mov r0,r20 - and r0,r6 - eor r2,r0 - mov r0,r21 - and r0,r7 - eor r3,r0 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - com r4 - com r5 - com r6 - com r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,133 - mov r8,r27 - ldi r27,16 - mov r9,r27 - ldi r27,134 - mov r10,r27 - ldi r27,163 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,8 - ldi r16,195 - ldi r17,209 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,132 - ldi r16,225 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,16 - ldi r25,194 - ldi r16,112 - ldi r17,180 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,8 - ldi r25,97 - ldi r16,56 - ldi r17,90 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,48 - ldi r16,28 - ldi r17,45 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,24 - ldi r16,142 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,12 - ldi r16,71 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,16 - ldi r25,134 - ldi r16,163 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,8 - ldi r25,195 - ldi r16,209 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,132 - ldi r25,225 - ldi r16,104 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,194 - ldi r25,112 - ldi r16,180 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,97 - ldi r25,56 - ldi r16,90 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,48 - ldi r25,28 - ldi r16,45 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,24 - ldi r25,142 - ldi r16,22 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,12 - ldi r25,71 - ldi r16,11 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,134 - ldi r25,163 - ldi r16,133 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,195 - ldi r25,209 - ldi r16,66 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,225 - ldi r25,104 - ldi r16,33 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,112 - ldi r25,180 - ldi r16,16 - ldi r17,194 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,56 - ldi r25,90 - ldi r16,8 - ldi r17,97 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,28 - ldi r25,45 - ldi r16,132 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,142 - ldi r25,22 - ldi r16,66 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,71 - ldi r25,11 - ldi r16,33 - ldi r17,12 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,163 - ldi r25,133 - ldi r16,16 - ldi r17,134 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,209 - ldi r25,66 - ldi r16,8 - ldi r17,195 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,104 - ldi r25,33 - ldi r16,132 - ldi r17,225 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,180 - ldi r25,16 - ldi r16,194 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,90 - ldi r25,8 - ldi r16,97 - ldi r17,56 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,45 - ldi r25,132 - ldi r16,48 - ldi r17,28 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,22 - ldi r25,66 - ldi r16,24 - ldi r17,142 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,11 - ldi r25,33 - ldi r16,12 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r18,r8 - movw r20,r10 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,33 - mov r8,r27 - ldi r27,112 - mov r9,r27 - ldi r27,65 - mov r10,r27 - ldi r27,99 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,16 - ldi r25,184 - ldi r16,160 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,8 - ldi r25,92 - ldi r16,208 - ldi r17,88 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,4 - ldi r25,46 - ldi r16,104 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,2 - ldi r25,23 - ldi r16,52 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,129 - ldi r25,11 - ldi r16,26 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,192 - ldi r25,5 - ldi r16,141 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,224 - ldi r25,130 - ldi r16,198 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,112 - ldi r25,65 - ldi r16,99 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,184 - ldi r25,160 - ldi r16,177 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,92 - ldi r25,208 - ldi r16,88 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,46 - ldi r25,104 - ldi r16,44 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,23 - ldi r25,52 - ldi r16,22 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,11 - ldi r25,26 - ldi r16,11 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,5 - ldi r25,141 - ldi r16,133 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,130 - ldi r25,198 - ldi r16,66 - ldi r17,224 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,65 - ldi r25,99 - ldi r16,33 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,160 - ldi r25,177 - ldi r16,16 - ldi r17,184 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,208 - ldi r25,88 - ldi r16,8 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,104 - ldi r25,44 - ldi r16,4 - ldi r17,46 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,52 - ldi r25,22 - ldi r16,2 - ldi r17,23 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,26 - ldi r25,11 - ldi r16,129 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,141 - ldi r25,133 - ldi r16,192 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,198 - ldi r25,66 - ldi r16,224 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,99 - ldi r25,33 - ldi r16,112 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,177 - ldi r25,16 - ldi r16,184 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,88 - ldi r25,8 - ldi r16,92 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,44 - ldi r25,4 - ldi r16,46 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,22 - ldi r25,2 - ldi r16,23 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,11 - ldi r25,129 - ldi r16,11 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,133 - ldi r25,192 - ldi r16,5 - ldi r17,141 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,66 - ldi r25,224 - ldi r16,130 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r22,r8 - movw r2,r10 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,128 - mov r8,r27 - ldi r27,242 - mov r9,r27 - ldi r27,44 - mov r10,r27 - ldi r27,105 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,64 - ldi r25,121 - ldi r16,150 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,160 - ldi r25,60 - ldi r16,75 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,80 - ldi r25,158 - ldi r16,37 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,40 - ldi r25,207 - ldi r16,146 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,148 - ldi r25,103 - ldi r16,73 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,202 - ldi r25,179 - ldi r16,164 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,229 - ldi r25,89 - ldi r16,210 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,242 - ldi r25,44 - ldi r16,105 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,121 - ldi r25,150 - ldi r16,52 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,60 - ldi r25,75 - ldi r16,26 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,158 - ldi r25,37 - ldi r16,13 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,207 - ldi r25,146 - ldi r16,6 - ldi r17,40 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,103 - ldi r25,73 - ldi r16,3 - ldi r17,148 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,179 - ldi r25,164 - ldi r16,1 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,89 - ldi r25,210 - mov r16,r1 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,44 - ldi r25,105 - ldi r16,128 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,150 - ldi r25,52 - ldi r16,64 - ldi r17,121 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,75 - ldi r25,26 - ldi r16,160 - ldi r17,60 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,37 - ldi r25,13 - ldi r16,80 - ldi r17,158 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,146 - ldi r25,6 - ldi r16,40 - ldi r17,207 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,73 - ldi r25,3 - ldi r16,148 - ldi r17,103 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,164 - ldi r25,1 - ldi r16,202 - ldi r17,179 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,210 - mov r25,r1 - ldi r16,229 - ldi r17,89 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,105 - ldi r25,128 - ldi r16,242 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,52 - ldi r25,64 - ldi r16,121 - ldi r17,150 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,26 - ldi r25,160 - ldi r16,60 - ldi r17,75 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,13 - ldi r25,80 - ldi r16,158 - ldi r17,37 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,6 - ldi r25,40 - ldi r16,207 - ldi r17,146 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,3 - ldi r25,148 - ldi r16,103 - ldi r17,73 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,1 - ldi r25,202 - ldi r16,179 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - mov r24,r1 - ldi r25,229 - ldi r16,89 - ldi r17,210 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r4,r8 - movw r6,r10 - dec r26 - breq 6545f - rjmp 13b -6545: - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ld r8,Z+ - ld r9,Z+ - ld r10,Z+ - ld r11,Z+ - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - pop r0 - pop r0 - pop r17 - pop r16 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_96_encrypt, .-pyjamask_96_encrypt - - .text -.global pyjamask_96_decrypt - .type pyjamask_96_decrypt, @function -pyjamask_96_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 16 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - subi r30,76 - sbci r31,255 - ld r9,-Z - ld r8,-Z - ld r27,-Z - ld r26,-Z - eor r4,r26 - eor r5,r27 - eor r6,r8 - eor r7,r9 - ld r9,-Z - ld r8,-Z - ld r27,-Z - ld r26,-Z - eor r22,r26 - eor r23,r27 - eor r2,r8 - eor r3,r9 - ld r9,-Z - ld r8,-Z - ld r27,-Z - ld r26,-Z - eor r18,r26 - eor r19,r27 - eor r20,r8 - eor r21,r9 - ldi r26,14 -39: - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,33 - mov r8,r27 - ldi r27,161 - mov r9,r27 - ldi r27,55 - mov r10,r27 - ldi r27,32 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,144 - ldi r25,208 - ldi r16,27 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,72 - ldi r25,232 - ldi r16,13 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,36 - ldi r25,244 - ldi r16,6 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,18 - ldi r25,122 - ldi r16,3 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,9 - ldi r25,189 - ldi r16,1 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,222 - ldi r16,128 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,111 - ldi r16,64 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,161 - ldi r25,55 - ldi r16,32 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,208 - ldi r25,27 - ldi r16,144 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,232 - ldi r25,13 - ldi r16,72 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,244 - ldi r25,6 - ldi r16,36 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,122 - ldi r25,3 - ldi r16,18 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,189 - ldi r25,1 - ldi r16,9 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,222 - ldi r25,128 - ldi r16,132 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,111 - ldi r25,64 - ldi r16,66 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,55 - ldi r25,32 - ldi r16,33 - ldi r17,161 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,27 - ldi r25,144 - ldi r16,144 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,13 - ldi r25,72 - ldi r16,72 - ldi r17,232 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,6 - ldi r25,36 - ldi r16,36 - ldi r17,244 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,3 - ldi r25,18 - ldi r16,18 - ldi r17,122 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,1 - ldi r25,9 - ldi r16,9 - ldi r17,189 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,128 - ldi r25,132 - ldi r16,132 - ldi r17,222 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,66 - ldi r16,66 - ldi r17,111 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,32 - ldi r25,33 - ldi r16,161 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,144 - ldi r25,144 - ldi r16,208 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,72 - ldi r25,72 - ldi r16,232 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,36 - ldi r25,36 - ldi r16,244 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,18 - ldi r25,18 - ldi r16,122 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,9 - ldi r25,9 - ldi r16,189 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,132 - ldi r25,132 - ldi r16,222 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,66 - ldi r25,66 - ldi r16,111 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r18,r8 - movw r20,r10 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,160 - mov r8,r27 - ldi r27,242 - mov r9,r27 - ldi r27,143 - mov r10,r27 - ldi r27,16 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,80 - ldi r25,249 - ldi r16,71 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,168 - ldi r25,252 - ldi r16,35 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,84 - ldi r25,254 - ldi r16,17 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,42 - ldi r25,255 - ldi r16,8 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,149 - ldi r25,127 - ldi r16,132 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,202 - ldi r25,63 - ldi r16,66 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,229 - ldi r25,31 - ldi r16,33 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,242 - ldi r25,143 - ldi r16,16 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,249 - ldi r25,71 - ldi r16,8 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,252 - ldi r25,35 - ldi r16,4 - ldi r17,168 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,254 - ldi r25,17 - ldi r16,2 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,255 - ldi r25,8 - ldi r16,1 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,127 - ldi r25,132 - mov r16,r1 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,63 - ldi r25,66 - ldi r16,128 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,31 - ldi r25,33 - ldi r16,64 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,143 - ldi r25,16 - ldi r16,160 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,71 - ldi r25,8 - ldi r16,80 - ldi r17,249 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,35 - ldi r25,4 - ldi r16,168 - ldi r17,252 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,17 - ldi r25,2 - ldi r16,84 - ldi r17,254 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,8 - ldi r25,1 - ldi r16,42 - ldi r17,255 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,132 - mov r25,r1 - ldi r16,149 - ldi r17,127 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,66 - ldi r25,128 - ldi r16,202 - ldi r17,63 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,33 - ldi r25,64 - ldi r16,229 - ldi r17,31 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,16 - ldi r25,160 - ldi r16,242 - ldi r17,143 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,8 - ldi r25,80 - ldi r16,249 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,4 - ldi r25,168 - ldi r16,252 - ldi r17,35 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,2 - ldi r25,84 - ldi r16,254 - ldi r17,17 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,1 - ldi r25,42 - ldi r16,255 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - mov r24,r1 - ldi r25,149 - ldi r16,127 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,128 - ldi r25,202 - ldi r16,63 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,64 - ldi r25,229 - ldi r16,31 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r22,r8 - movw r2,r10 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,192 - mov r8,r27 - ldi r27,216 - mov r9,r27 - ldi r27,84 - mov r10,r27 - ldi r27,144 - mov r11,r27 - and r8,r0 - and r9,r0 - and r10,r0 - and r11,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,96 - ldi r25,108 - ldi r16,42 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,48 - ldi r25,54 - ldi r16,21 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,24 - ldi r25,155 - ldi r16,10 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,140 - ldi r25,77 - ldi r16,5 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,198 - ldi r25,166 - ldi r16,130 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,99 - ldi r25,83 - ldi r16,65 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,177 - ldi r25,169 - ldi r16,32 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,216 - ldi r25,84 - ldi r16,144 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,108 - ldi r25,42 - ldi r16,72 - ldi r17,96 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,54 - ldi r25,21 - ldi r16,36 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,155 - ldi r25,10 - ldi r16,18 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,77 - ldi r25,5 - ldi r16,9 - ldi r17,140 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,166 - ldi r25,130 - ldi r16,4 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,83 - ldi r25,65 - ldi r16,2 - ldi r17,99 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,169 - ldi r25,32 - ldi r16,129 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,84 - ldi r25,144 - ldi r16,192 - ldi r17,216 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,42 - ldi r25,72 - ldi r16,96 - ldi r17,108 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,21 - ldi r25,36 - ldi r16,48 - ldi r17,54 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,10 - ldi r25,18 - ldi r16,24 - ldi r17,155 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,5 - ldi r25,9 - ldi r16,140 - ldi r17,77 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,130 - ldi r25,4 - ldi r16,198 - ldi r17,166 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,65 - ldi r25,2 - ldi r16,99 - ldi r17,83 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,32 - ldi r25,129 - ldi r16,177 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,144 - ldi r25,192 - ldi r16,216 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,72 - ldi r25,96 - ldi r16,108 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,36 - ldi r25,48 - ldi r16,54 - ldi r17,21 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,18 - ldi r25,24 - ldi r16,155 - ldi r17,10 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,9 - ldi r25,140 - ldi r16,77 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,4 - ldi r25,198 - ldi r16,166 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,2 - ldi r25,99 - ldi r16,83 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,129 - ldi r25,177 - ldi r16,169 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r8,r24 - eor r9,r25 - eor r10,r16 - eor r11,r17 - movw r4,r8 - movw r6,r10 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - com r4 - com r5 - com r6 - com r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r18 - and r0,r4 - eor r22,r0 - mov r0,r19 - and r0,r5 - eor r23,r0 - mov r0,r20 - and r0,r6 - eor r2,r0 - mov r0,r21 - and r0,r7 - eor r3,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r18 - and r0,r22 - eor r4,r0 - mov r0,r19 - and r0,r23 - eor r5,r0 - mov r0,r20 - and r0,r2 - eor r6,r0 - mov r0,r21 - and r0,r3 - eor r7,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - ld r11,-Z - ld r10,-Z - ld r9,-Z - ld r8,-Z - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - ld r11,-Z - ld r10,-Z - ld r9,-Z - ld r8,-Z - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - ld r11,-Z - ld r10,-Z - ld r9,-Z - ld r8,-Z - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - dec r26 - breq 6571f - rjmp 39b -6571: - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - pop r0 - pop r0 - pop r17 - pop r16 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_96_decrypt, .-pyjamask_96_decrypt - - .text -.global pyjamask_128_setup_key - .type pyjamask_128_setup_key, @function -pyjamask_128_setup_key: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r26,r1 -33: - movw r12,r18 - movw r14,r20 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r12,r4 - eor r13,r5 - eor r14,r6 - eor r15,r7 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,202 - mov r12,r27 - ldi r27,185 - mov r13,r27 - ldi r27,129 - mov r14,r27 - ldi r27,184 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,229 - ldi r25,220 - ldi r16,64 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,114 - ldi r25,110 - ldi r16,32 - ldi r17,174 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,57 - ldi r25,55 - ldi r16,16 - ldi r17,87 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,156 - ldi r25,27 - ldi r16,136 - ldi r17,171 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,206 - ldi r25,13 - ldi r16,196 - ldi r17,85 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,231 - ldi r25,6 - ldi r16,226 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,115 - ldi r25,3 - ldi r16,113 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,185 - ldi r25,129 - ldi r16,184 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,220 - ldi r25,64 - ldi r16,92 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,110 - ldi r25,32 - ldi r16,174 - ldi r17,114 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,55 - ldi r25,16 - ldi r16,87 - ldi r17,57 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,27 - ldi r25,136 - ldi r16,171 - ldi r17,156 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,13 - ldi r25,196 - ldi r16,85 - ldi r17,206 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,6 - ldi r25,226 - ldi r16,42 - ldi r17,231 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,3 - ldi r25,113 - ldi r16,149 - ldi r17,115 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,129 - ldi r25,184 - ldi r16,202 - ldi r17,185 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,92 - ldi r16,229 - ldi r17,220 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,32 - ldi r25,174 - ldi r16,114 - ldi r17,110 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,16 - ldi r25,87 - ldi r16,57 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,136 - ldi r25,171 - ldi r16,156 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,196 - ldi r25,85 - ldi r16,206 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,226 - ldi r25,42 - ldi r16,231 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,113 - ldi r25,149 - ldi r16,115 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,184 - ldi r25,202 - ldi r16,185 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,92 - ldi r25,229 - ldi r16,220 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,174 - ldi r25,114 - ldi r16,110 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,87 - ldi r25,57 - ldi r16,55 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,171 - ldi r25,156 - ldi r16,27 - ldi r17,136 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,85 - ldi r25,206 - ldi r16,13 - ldi r17,196 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,42 - ldi r25,231 - ldi r16,6 - ldi r17,226 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,149 - ldi r25,115 - ldi r16,3 - ldi r17,113 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - ldi r25,128 - eor r18,r25 - eor r18,r26 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - ldi r24,106 - eor r23,r24 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - lsl r4 - rol r5 - rol r6 - rol r7 - adc r4,r1 - ldi r17,63 - eor r6,r17 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - lsr r11 - ror r10 - ror r9 - ror r8 - ror r0 - or r11,r0 - ldi r16,36 - eor r11,r16 - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - inc r26 - ldi r27,14 - cpse r26,r27 - rjmp 33b - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size pyjamask_128_setup_key, .-pyjamask_128_setup_key - - .text -.global pyjamask_128_encrypt - .type pyjamask_128_encrypt, @function -pyjamask_128_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ldi r26,14 -17: - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - mov r0,r18 - and r0,r22 - eor r8,r0 - mov r0,r19 - and r0,r23 - eor r9,r0 - mov r0,r20 - and r0,r2 - eor r10,r0 - mov r0,r21 - and r0,r3 - eor r11,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r4 - and r0,r8 - eor r22,r0 - mov r0,r5 - and r0,r9 - eor r23,r0 - mov r0,r6 - and r0,r10 - eor r2,r0 - mov r0,r7 - and r0,r11 - eor r3,r0 - mov r0,r18 - and r0,r8 - eor r4,r0 - mov r0,r19 - and r0,r9 - eor r5,r0 - mov r0,r20 - and r0,r10 - eor r6,r0 - mov r0,r21 - and r0,r11 - eor r7,r0 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - com r8 - com r9 - com r10 - com r11 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,133 - mov r12,r27 - ldi r27,16 - mov r13,r27 - ldi r27,134 - mov r14,r27 - ldi r27,163 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,8 - ldi r16,195 - ldi r17,209 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,132 - ldi r16,225 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,16 - ldi r25,194 - ldi r16,112 - ldi r17,180 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,8 - ldi r25,97 - ldi r16,56 - ldi r17,90 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,48 - ldi r16,28 - ldi r17,45 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,24 - ldi r16,142 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,33 - ldi r25,12 - ldi r16,71 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,16 - ldi r25,134 - ldi r16,163 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,8 - ldi r25,195 - ldi r16,209 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,132 - ldi r25,225 - ldi r16,104 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,194 - ldi r25,112 - ldi r16,180 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,97 - ldi r25,56 - ldi r16,90 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,48 - ldi r25,28 - ldi r16,45 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,24 - ldi r25,142 - ldi r16,22 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,12 - ldi r25,71 - ldi r16,11 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,134 - ldi r25,163 - ldi r16,133 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,195 - ldi r25,209 - ldi r16,66 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,225 - ldi r25,104 - ldi r16,33 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,112 - ldi r25,180 - ldi r16,16 - ldi r17,194 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,56 - ldi r25,90 - ldi r16,8 - ldi r17,97 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,28 - ldi r25,45 - ldi r16,132 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,142 - ldi r25,22 - ldi r16,66 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,71 - ldi r25,11 - ldi r16,33 - ldi r17,12 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,163 - ldi r25,133 - ldi r16,16 - ldi r17,134 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,209 - ldi r25,66 - ldi r16,8 - ldi r17,195 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,104 - ldi r25,33 - ldi r16,132 - ldi r17,225 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,180 - ldi r25,16 - ldi r16,194 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,90 - ldi r25,8 - ldi r16,97 - ldi r17,56 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,45 - ldi r25,132 - ldi r16,48 - ldi r17,28 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,22 - ldi r25,66 - ldi r16,24 - ldi r17,142 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,11 - ldi r25,33 - ldi r16,12 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,33 - mov r12,r27 - ldi r27,112 - mov r13,r27 - ldi r27,65 - mov r14,r27 - ldi r27,99 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,16 - ldi r25,184 - ldi r16,160 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,8 - ldi r25,92 - ldi r16,208 - ldi r17,88 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,4 - ldi r25,46 - ldi r16,104 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,2 - ldi r25,23 - ldi r16,52 - ldi r17,22 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,129 - ldi r25,11 - ldi r16,26 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,192 - ldi r25,5 - ldi r16,141 - ldi r17,133 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,224 - ldi r25,130 - ldi r16,198 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,112 - ldi r25,65 - ldi r16,99 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,184 - ldi r25,160 - ldi r16,177 - ldi r17,16 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,92 - ldi r25,208 - ldi r16,88 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,46 - ldi r25,104 - ldi r16,44 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,23 - ldi r25,52 - ldi r16,22 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,11 - ldi r25,26 - ldi r16,11 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,5 - ldi r25,141 - ldi r16,133 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,130 - ldi r25,198 - ldi r16,66 - ldi r17,224 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,65 - ldi r25,99 - ldi r16,33 - ldi r17,112 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,160 - ldi r25,177 - ldi r16,16 - ldi r17,184 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,208 - ldi r25,88 - ldi r16,8 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,104 - ldi r25,44 - ldi r16,4 - ldi r17,46 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,52 - ldi r25,22 - ldi r16,2 - ldi r17,23 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,26 - ldi r25,11 - ldi r16,129 - ldi r17,11 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,141 - ldi r25,133 - ldi r16,192 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,198 - ldi r25,66 - ldi r16,224 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,99 - ldi r25,33 - ldi r16,112 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,177 - ldi r25,16 - ldi r16,184 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,88 - ldi r25,8 - ldi r16,92 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,44 - ldi r25,4 - ldi r16,46 - ldi r17,104 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,22 - ldi r25,2 - ldi r16,23 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,11 - ldi r25,129 - ldi r16,11 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,133 - ldi r25,192 - ldi r16,5 - ldi r17,141 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,66 - ldi r25,224 - ldi r16,130 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r22,r12 - movw r2,r14 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,128 - mov r12,r27 - ldi r27,242 - mov r13,r27 - ldi r27,44 - mov r14,r27 - ldi r27,105 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,64 - ldi r25,121 - ldi r16,150 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,160 - ldi r25,60 - ldi r16,75 - ldi r17,26 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,80 - ldi r25,158 - ldi r16,37 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,40 - ldi r25,207 - ldi r16,146 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,148 - ldi r25,103 - ldi r16,73 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,202 - ldi r25,179 - ldi r16,164 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,229 - ldi r25,89 - ldi r16,210 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,242 - ldi r25,44 - ldi r16,105 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,121 - ldi r25,150 - ldi r16,52 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,60 - ldi r25,75 - ldi r16,26 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,158 - ldi r25,37 - ldi r16,13 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,207 - ldi r25,146 - ldi r16,6 - ldi r17,40 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,103 - ldi r25,73 - ldi r16,3 - ldi r17,148 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,179 - ldi r25,164 - ldi r16,1 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,89 - ldi r25,210 - mov r16,r1 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,44 - ldi r25,105 - ldi r16,128 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,150 - ldi r25,52 - ldi r16,64 - ldi r17,121 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,75 - ldi r25,26 - ldi r16,160 - ldi r17,60 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,37 - ldi r25,13 - ldi r16,80 - ldi r17,158 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,146 - ldi r25,6 - ldi r16,40 - ldi r17,207 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,73 - ldi r25,3 - ldi r16,148 - ldi r17,103 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,164 - ldi r25,1 - ldi r16,202 - ldi r17,179 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,210 - mov r25,r1 - ldi r16,229 - ldi r17,89 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,105 - ldi r25,128 - ldi r16,242 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,52 - ldi r25,64 - ldi r16,121 - ldi r17,150 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,26 - ldi r25,160 - ldi r16,60 - ldi r17,75 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,13 - ldi r25,80 - ldi r16,158 - ldi r17,37 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,6 - ldi r25,40 - ldi r16,207 - ldi r17,146 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,3 - ldi r25,148 - ldi r16,103 - ldi r17,73 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,1 - ldi r25,202 - ldi r16,179 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - mov r24,r1 - ldi r25,229 - ldi r16,89 - ldi r17,210 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r4,r12 - movw r6,r14 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r27,19 - mov r12,r27 - ldi r27,72 - mov r13,r27 - ldi r27,165 - mov r14,r27 - ldi r27,72 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,9 - ldi r25,164 - ldi r16,82 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,4 - ldi r25,82 - ldi r16,41 - ldi r17,210 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,2 - ldi r25,169 - ldi r16,20 - ldi r17,105 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,129 - ldi r25,84 - ldi r16,138 - ldi r17,52 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,64 - ldi r25,42 - ldi r16,69 - ldi r17,154 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,32 - ldi r25,149 - ldi r16,34 - ldi r17,77 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,144 - ldi r25,74 - ldi r16,145 - ldi r17,38 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,72 - ldi r25,165 - ldi r16,72 - ldi r17,19 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,164 - ldi r25,82 - ldi r16,164 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,82 - ldi r25,41 - ldi r16,210 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,169 - ldi r25,20 - ldi r16,105 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,84 - ldi r25,138 - ldi r16,52 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,42 - ldi r25,69 - ldi r16,154 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,149 - ldi r25,34 - ldi r16,77 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,74 - ldi r25,145 - ldi r16,38 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,165 - ldi r25,72 - ldi r16,19 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,82 - ldi r25,164 - ldi r16,9 - ldi r17,164 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,41 - ldi r25,210 - ldi r16,4 - ldi r17,82 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,20 - ldi r25,105 - ldi r16,2 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,138 - ldi r25,52 - ldi r16,129 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,69 - ldi r25,154 - ldi r16,64 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,34 - ldi r25,77 - ldi r16,32 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,145 - ldi r25,38 - ldi r16,144 - ldi r17,74 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,72 - ldi r25,19 - ldi r16,72 - ldi r17,165 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,164 - ldi r25,9 - ldi r16,164 - ldi r17,82 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,210 - ldi r25,4 - ldi r16,82 - ldi r17,41 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,105 - ldi r25,2 - ldi r16,169 - ldi r17,20 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,52 - ldi r25,129 - ldi r16,84 - ldi r17,138 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,154 - ldi r25,64 - ldi r16,42 - ldi r17,69 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,77 - ldi r25,32 - ldi r16,149 - ldi r17,34 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,38 - ldi r25,144 - ldi r16,74 - ldi r17,145 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r8,r12 - movw r10,r14 - dec r26 - breq 7055f - rjmp 17b -7055: - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - ld r12,Z+ - ld r13,Z+ - ld r14,Z+ - ld r15,Z+ - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_128_encrypt, .-pyjamask_128_encrypt - - .text -.global pyjamask_128_decrypt - .type pyjamask_128_decrypt, @function -pyjamask_128_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 20 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - subi r30,16 - sbci r31,255 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r8,r26 - eor r9,r27 - eor r10,r12 - eor r11,r13 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r4,r26 - eor r5,r27 - eor r6,r12 - eor r7,r13 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r22,r26 - eor r23,r27 - eor r2,r12 - eor r3,r13 - ld r13,-Z - ld r12,-Z - ld r27,-Z - ld r26,-Z - eor r18,r26 - eor r19,r27 - eor r20,r12 - eor r21,r13 - ldi r26,14 -51: - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r27,33 - mov r12,r27 - ldi r27,161 - mov r13,r27 - ldi r27,55 - mov r14,r27 - ldi r27,32 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,144 - ldi r25,208 - ldi r16,27 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,72 - ldi r25,232 - ldi r16,13 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,36 - ldi r25,244 - ldi r16,6 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,18 - ldi r25,122 - ldi r16,3 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,9 - ldi r25,189 - ldi r16,1 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,132 - ldi r25,222 - ldi r16,128 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r21 - sbc r0,r1 - ldi r24,66 - ldi r25,111 - ldi r16,64 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,161 - ldi r25,55 - ldi r16,32 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,208 - ldi r25,27 - ldi r16,144 - ldi r17,144 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,232 - ldi r25,13 - ldi r16,72 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,244 - ldi r25,6 - ldi r16,36 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,122 - ldi r25,3 - ldi r16,18 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,189 - ldi r25,1 - ldi r16,9 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,222 - ldi r25,128 - ldi r16,132 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r20 - sbc r0,r1 - ldi r24,111 - ldi r25,64 - ldi r16,66 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,55 - ldi r25,32 - ldi r16,33 - ldi r17,161 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,27 - ldi r25,144 - ldi r16,144 - ldi r17,208 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,13 - ldi r25,72 - ldi r16,72 - ldi r17,232 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,6 - ldi r25,36 - ldi r16,36 - ldi r17,244 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,3 - ldi r25,18 - ldi r16,18 - ldi r17,122 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,1 - ldi r25,9 - ldi r16,9 - ldi r17,189 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,128 - ldi r25,132 - ldi r16,132 - ldi r17,222 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r19 - sbc r0,r1 - ldi r24,64 - ldi r25,66 - ldi r16,66 - ldi r17,111 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,32 - ldi r25,33 - ldi r16,161 - ldi r17,55 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,144 - ldi r25,144 - ldi r16,208 - ldi r17,27 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,72 - ldi r25,72 - ldi r16,232 - ldi r17,13 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,36 - ldi r25,36 - ldi r16,244 - ldi r17,6 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,18 - ldi r25,18 - ldi r16,122 - ldi r17,3 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,9 - ldi r25,9 - ldi r16,189 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,132 - ldi r25,132 - ldi r16,222 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r18 - sbc r0,r1 - ldi r24,66 - ldi r25,66 - ldi r16,111 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r18,r12 - movw r20,r14 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r27,160 - mov r12,r27 - ldi r27,242 - mov r13,r27 - ldi r27,143 - mov r14,r27 - ldi r27,16 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,80 - ldi r25,249 - ldi r16,71 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,168 - ldi r25,252 - ldi r16,35 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,84 - ldi r25,254 - ldi r16,17 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,42 - ldi r25,255 - ldi r16,8 - ldi r17,1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,149 - ldi r25,127 - ldi r16,132 - mov r17,r1 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,202 - ldi r25,63 - ldi r16,66 - ldi r17,128 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r3 - sbc r0,r1 - ldi r24,229 - ldi r25,31 - ldi r16,33 - ldi r17,64 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,242 - ldi r25,143 - ldi r16,16 - ldi r17,160 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,249 - ldi r25,71 - ldi r16,8 - ldi r17,80 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,252 - ldi r25,35 - ldi r16,4 - ldi r17,168 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,254 - ldi r25,17 - ldi r16,2 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,255 - ldi r25,8 - ldi r16,1 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,127 - ldi r25,132 - mov r16,r1 - ldi r17,149 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,63 - ldi r25,66 - ldi r16,128 - ldi r17,202 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r2 - sbc r0,r1 - ldi r24,31 - ldi r25,33 - ldi r16,64 - ldi r17,229 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,143 - ldi r25,16 - ldi r16,160 - ldi r17,242 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,71 - ldi r25,8 - ldi r16,80 - ldi r17,249 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,35 - ldi r25,4 - ldi r16,168 - ldi r17,252 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,17 - ldi r25,2 - ldi r16,84 - ldi r17,254 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,8 - ldi r25,1 - ldi r16,42 - ldi r17,255 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,132 - mov r25,r1 - ldi r16,149 - ldi r17,127 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,66 - ldi r25,128 - ldi r16,202 - ldi r17,63 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r23 - sbc r0,r1 - ldi r24,33 - ldi r25,64 - ldi r16,229 - ldi r17,31 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,16 - ldi r25,160 - ldi r16,242 - ldi r17,143 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,8 - ldi r25,80 - ldi r16,249 - ldi r17,71 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,4 - ldi r25,168 - ldi r16,252 - ldi r17,35 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,2 - ldi r25,84 - ldi r16,254 - ldi r17,17 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,1 - ldi r25,42 - ldi r16,255 - ldi r17,8 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - mov r24,r1 - ldi r25,149 - ldi r16,127 - ldi r17,132 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,128 - ldi r25,202 - ldi r16,63 - ldi r17,66 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r22 - sbc r0,r1 - ldi r24,64 - ldi r25,229 - ldi r16,31 - ldi r17,33 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r22,r12 - movw r2,r14 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r27,192 - mov r12,r27 - ldi r27,216 - mov r13,r27 - ldi r27,84 - mov r14,r27 - ldi r27,144 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,96 - ldi r25,108 - ldi r16,42 - ldi r17,72 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,48 - ldi r25,54 - ldi r16,21 - ldi r17,36 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,24 - ldi r25,155 - ldi r16,10 - ldi r17,18 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,140 - ldi r25,77 - ldi r16,5 - ldi r17,9 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,198 - ldi r25,166 - ldi r16,130 - ldi r17,4 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,99 - ldi r25,83 - ldi r16,65 - ldi r17,2 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r7 - sbc r0,r1 - ldi r24,177 - ldi r25,169 - ldi r16,32 - ldi r17,129 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,216 - ldi r25,84 - ldi r16,144 - ldi r17,192 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,108 - ldi r25,42 - ldi r16,72 - ldi r17,96 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,54 - ldi r25,21 - ldi r16,36 - ldi r17,48 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,155 - ldi r25,10 - ldi r16,18 - ldi r17,24 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,77 - ldi r25,5 - ldi r16,9 - ldi r17,140 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,166 - ldi r25,130 - ldi r16,4 - ldi r17,198 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,83 - ldi r25,65 - ldi r16,2 - ldi r17,99 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r6 - sbc r0,r1 - ldi r24,169 - ldi r25,32 - ldi r16,129 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,84 - ldi r25,144 - ldi r16,192 - ldi r17,216 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,42 - ldi r25,72 - ldi r16,96 - ldi r17,108 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,21 - ldi r25,36 - ldi r16,48 - ldi r17,54 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,10 - ldi r25,18 - ldi r16,24 - ldi r17,155 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,5 - ldi r25,9 - ldi r16,140 - ldi r17,77 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,130 - ldi r25,4 - ldi r16,198 - ldi r17,166 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,65 - ldi r25,2 - ldi r16,99 - ldi r17,83 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r5 - sbc r0,r1 - ldi r24,32 - ldi r25,129 - ldi r16,177 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,144 - ldi r25,192 - ldi r16,216 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,72 - ldi r25,96 - ldi r16,108 - ldi r17,42 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,36 - ldi r25,48 - ldi r16,54 - ldi r17,21 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,18 - ldi r25,24 - ldi r16,155 - ldi r17,10 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,9 - ldi r25,140 - ldi r16,77 - ldi r17,5 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,4 - ldi r25,198 - ldi r16,166 - ldi r17,130 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,2 - ldi r25,99 - ldi r16,83 - ldi r17,65 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r4 - sbc r0,r1 - ldi r24,129 - ldi r25,177 - ldi r16,169 - ldi r17,32 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r4,r12 - movw r6,r14 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r27,23 - mov r12,r27 - ldi r27,177 - mov r13,r27 - ldi r27,84 - mov r14,r27 - ldi r27,51 - mov r15,r27 - and r12,r0 - and r13,r0 - and r14,r0 - and r15,r0 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,139 - ldi r25,88 - ldi r16,170 - ldi r17,153 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,69 - ldi r25,44 - ldi r16,213 - ldi r17,204 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,34 - ldi r25,150 - ldi r16,106 - ldi r17,230 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,17 - ldi r25,75 - ldi r16,53 - ldi r17,115 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,136 - ldi r25,165 - ldi r16,154 - ldi r17,185 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,196 - ldi r25,82 - ldi r16,205 - ldi r17,92 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r11 - sbc r0,r1 - ldi r24,98 - ldi r25,169 - ldi r16,102 - ldi r17,46 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,177 - ldi r25,84 - ldi r16,51 - ldi r17,23 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,88 - ldi r25,170 - ldi r16,153 - ldi r17,139 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,44 - ldi r25,213 - ldi r16,204 - ldi r17,69 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,150 - ldi r25,106 - ldi r16,230 - ldi r17,34 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,75 - ldi r25,53 - ldi r16,115 - ldi r17,17 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,165 - ldi r25,154 - ldi r16,185 - ldi r17,136 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,82 - ldi r25,205 - ldi r16,92 - ldi r17,196 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r10 - sbc r0,r1 - ldi r24,169 - ldi r25,102 - ldi r16,46 - ldi r17,98 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,84 - ldi r25,51 - ldi r16,23 - ldi r17,177 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,170 - ldi r25,153 - ldi r16,139 - ldi r17,88 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,213 - ldi r25,204 - ldi r16,69 - ldi r17,44 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,106 - ldi r25,230 - ldi r16,34 - ldi r17,150 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,53 - ldi r25,115 - ldi r16,17 - ldi r17,75 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,154 - ldi r25,185 - ldi r16,136 - ldi r17,165 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,205 - ldi r25,92 - ldi r16,196 - ldi r17,82 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r9 - sbc r0,r1 - ldi r24,102 - ldi r25,46 - ldi r16,98 - ldi r17,169 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,51 - ldi r25,23 - ldi r16,177 - ldi r17,84 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,153 - ldi r25,139 - ldi r16,88 - ldi r17,170 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,204 - ldi r25,69 - ldi r16,44 - ldi r17,213 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,230 - ldi r25,34 - ldi r16,150 - ldi r17,106 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,115 - ldi r25,17 - ldi r16,75 - ldi r17,53 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,185 - ldi r25,136 - ldi r16,165 - ldi r17,154 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,92 - ldi r25,196 - ldi r16,82 - ldi r17,205 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - mov r0,r1 - lsl r8 - sbc r0,r1 - ldi r24,46 - ldi r25,98 - ldi r16,169 - ldi r17,102 - and r24,r0 - and r25,r0 - and r16,r0 - and r17,r0 - eor r12,r24 - eor r13,r25 - eor r14,r16 - eor r15,r17 - movw r8,r12 - movw r10,r14 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - com r8 - com r9 - com r10 - com r11 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - mov r0,r18 - and r0,r8 - eor r4,r0 - mov r0,r19 - and r0,r9 - eor r5,r0 - mov r0,r20 - and r0,r10 - eor r6,r0 - mov r0,r21 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r8 - eor r22,r0 - mov r0,r5 - and r0,r9 - eor r23,r0 - mov r0,r6 - and r0,r10 - eor r2,r0 - mov r0,r7 - and r0,r11 - eor r3,r0 - mov r0,r22 - and r0,r4 - eor r18,r0 - mov r0,r23 - and r0,r5 - eor r19,r0 - mov r0,r2 - and r0,r6 - eor r20,r0 - mov r0,r3 - and r0,r7 - eor r21,r0 - mov r0,r18 - and r0,r22 - eor r8,r0 - mov r0,r19 - and r0,r23 - eor r9,r0 - mov r0,r20 - and r0,r2 - eor r10,r0 - mov r0,r21 - and r0,r3 - eor r11,r0 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ld r15,-Z - ld r14,-Z - ld r13,-Z - ld r12,-Z - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - dec r26 - breq 7089f - rjmp 51b -7089: - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r21 - st X+,r20 - st X+,r19 - st X+,r18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - pop r0 - pop r0 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size pyjamask_128_decrypt, .-pyjamask_128_decrypt - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.c b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.c deleted file mode 100644 index 3c40d2d..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-pyjamask.h" -#include "internal-util.h" - -#if !defined(__AVR__) - -/** - * \brief Performs a circulant binary matrix multiplication. - * - * \param x The matrix. - * \param y The vector to multiply with the matrix. - * - * \return The vector result of multiplying x by y. - */ -STATIC_INLINE uint32_t pyjamask_matrix_multiply(uint32_t x, uint32_t y) -{ - uint32_t result = 0; - int bit; - for (bit = 31; bit >= 0; --bit) { -#if defined(ESP32) - /* This version has slightly better performance on ESP32 */ - y = leftRotate1(y); - result ^= x & -(y & 1); - x = rightRotate1(x); -#else - result ^= x & -((y >> bit) & 1); - x = rightRotate1(x); -#endif - } - return result; -} - -void pyjamask_128_setup_key - (pyjamask_128_key_schedule_t *ks, const unsigned char *key) -{ - uint32_t *rk = ks->k; - uint32_t k0, k1, k2, k3; - uint32_t temp; - uint8_t round; - - /* Load the words of the key */ - k0 = be_load_word32(key); - k1 = be_load_word32(key + 4); - k2 = be_load_word32(key + 8); - k3 = be_load_word32(key + 12); - - /* The first round key is the same as the key itself */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - rk[3] = k3; - rk += 4; - - /* Derive the round keys for all of the other rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 4) { - /* Mix the columns */ - temp = k0 ^ k1 ^ k2 ^ k3; - k0 ^= temp; - k1 ^= temp; - k2 ^= temp; - k3 ^= temp; - - /* Mix the rows and add the round constants. Note that the Pyjamask - * specification says that k1/k2/k3 should be rotated left by 8, 15, - * and 18 bits. But the reference code actually rotates the words - * right. And the test vectors in the specification match up with - * right rotations, not left. We match the reference code here */ - k0 = pyjamask_matrix_multiply(0xb881b9caU, k0) ^ 0x00000080U ^ round; - k1 = rightRotate8(k1) ^ 0x00006a00U; - k2 = rightRotate15(k2) ^ 0x003f0000U; - k3 = rightRotate18(k3) ^ 0x24000000U; - - /* Write the round key to the schedule */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - rk[3] = k3; - } -} - -void pyjamask_96_setup_key - (pyjamask_96_key_schedule_t *ks, const unsigned char *key) -{ - uint32_t *rk = ks->k; - uint32_t k0, k1, k2, k3; - uint32_t temp; - uint8_t round; - - /* Load the words of the key */ - k0 = be_load_word32(key); - k1 = be_load_word32(key + 4); - k2 = be_load_word32(key + 8); - k3 = be_load_word32(key + 12); - - /* The first round key is the same as the key itself */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - rk += 3; - - /* Derive the round keys for all of the other rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { - /* Mix the columns */ - temp = k0 ^ k1 ^ k2 ^ k3; - k0 ^= temp; - k1 ^= temp; - k2 ^= temp; - k3 ^= temp; - - /* Mix the rows and add the round constants. Note that the Pyjamask - * specification says that k1/k2/k3 should be rotated left by 8, 15, - * and 18 bits. But the reference code actually rotates the words - * right. And the test vectors in the specification match up with - * right rotations, not left. We match the reference code here */ - k0 = pyjamask_matrix_multiply(0xb881b9caU, k0) ^ 0x00000080U ^ round; - k1 = rightRotate8(k1) ^ 0x00006a00U; - k2 = rightRotate15(k2) ^ 0x003f0000U; - k3 = rightRotate18(k3) ^ 0x24000000U; - - /* Write the round key to the schedule */ - rk[0] = k0; - rk[1] = k1; - rk[2] = k2; - } -} - -void pyjamask_128_encrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k; - uint32_t s0, s1, s2, s3; - uint8_t round; - - /* Load the plaintext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 4) { - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - - /* Apply the 128-bit Pyjamask sbox */ - s0 ^= s3; - s3 ^= s0 & s1; - s0 ^= s1 & s2; - s1 ^= s2 & s3; - s2 ^= s0 & s3; - s2 ^= s1; - s1 ^= s0; - s3 = ~s3; - s2 ^= s3; - s3 ^= s2; - s2 ^= s3; - - /* Mix the rows of the state */ - s0 = pyjamask_matrix_multiply(0xa3861085U, s0); - s1 = pyjamask_matrix_multiply(0x63417021U, s1); - s2 = pyjamask_matrix_multiply(0x692cf280U, s2); - s3 = pyjamask_matrix_multiply(0x48a54813U, s3); - } - - /* Mix in the key one last time */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - - /* Write the ciphertext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void pyjamask_128_decrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k + 4 * PYJAMASK_ROUNDS; - uint32_t s0, s1, s2, s3; - uint8_t round; - - /* Load the ciphertext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Mix in the last round key */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - rk -= 4; - - /* Perform all decryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 4) { - /* Inverse mix of the rows in the state */ - s0 = pyjamask_matrix_multiply(0x2037a121U, s0); - s1 = pyjamask_matrix_multiply(0x108ff2a0U, s1); - s2 = pyjamask_matrix_multiply(0x9054d8c0U, s2); - s3 = pyjamask_matrix_multiply(0x3354b117U, s3); - - /* Apply the inverse of the 128-bit Pyjamask sbox */ - s2 ^= s3; - s3 ^= s2; - s2 ^= s3; - s3 = ~s3; - s1 ^= s0; - s2 ^= s1; - s2 ^= s0 & s3; - s1 ^= s2 & s3; - s0 ^= s1 & s2; - s3 ^= s0 & s1; - s0 ^= s3; - - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - s3 ^= rk[3]; - } - - /* Write the plaintext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void pyjamask_96_encrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k; - uint32_t s0, s1, s2; - uint8_t round; - - /* Load the plaintext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - - /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - - /* Apply the 96-bit Pyjamask sbox */ - s0 ^= s1; - s1 ^= s2; - s2 ^= s0 & s1; - s0 ^= s1 & s2; - s1 ^= s0 & s2; - s2 ^= s0; - s2 = ~s2; - s1 ^= s0; - s0 ^= s1; - - /* Mix the rows of the state */ - s0 = pyjamask_matrix_multiply(0xa3861085U, s0); - s1 = pyjamask_matrix_multiply(0x63417021U, s1); - s2 = pyjamask_matrix_multiply(0x692cf280U, s2); - } - - /* Mix in the key one last time */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - - /* Write the ciphertext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); -} - -void pyjamask_96_decrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - const uint32_t *rk = ks->k + 3 * PYJAMASK_ROUNDS; - uint32_t s0, s1, s2; - uint8_t round; - - /* Load the plaintext from the input buffer */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - - /* Mix in the last round key */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - rk -= 3; - - /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 3) { - /* Inverse mix of the rows in the state */ - s0 = pyjamask_matrix_multiply(0x2037a121U, s0); - s1 = pyjamask_matrix_multiply(0x108ff2a0U, s1); - s2 = pyjamask_matrix_multiply(0x9054d8c0U, s2); - - /* Apply the inverse of the 96-bit Pyjamask sbox */ - s0 ^= s1; - s1 ^= s0; - s2 = ~s2; - s2 ^= s0; - s1 ^= s0 & s2; - s0 ^= s1 & s2; - s2 ^= s0 & s1; - s1 ^= s2; - s0 ^= s1; - - /* Add the round key to the state */ - s0 ^= rk[0]; - s1 ^= rk[1]; - s2 ^= rk[2]; - } - - /* Write the ciphertext to the output buffer */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); -} - -#endif /* !__AVR__ */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.h deleted file mode 100644 index 3ead7fb..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-pyjamask.h +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_PYJAMASK_H -#define LW_INTERNAL_PYJAMASK_H - -#include "internal-util.h" - -/** - * \file internal-pyjamask.h - * \brief Pyjamask block cipher. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Number of rounds in the Pyjamask block cipher. - */ -#define PYJAMASK_ROUNDS 14 - -/** - * \brief Number of parallel states for masked operation. - */ -#define PYJAMASK_MASKING_ORDER 4 - -/** - * \brief Structure of the key schedule for the Pyjamask-128 block cipher. - */ -typedef struct -{ - uint32_t k[(PYJAMASK_ROUNDS + 1) * 4]; /**< Words of the key schedule */ - -} pyjamask_128_key_schedule_t; - -/** - * \brief Structure of the key schedule for the Pyjamask-96 block cipher. - */ -typedef struct -{ - uint32_t k[(PYJAMASK_ROUNDS + 1) * 3]; /**< Words of the key schedule */ - -} pyjamask_96_key_schedule_t; - -/** - * \brief Structure of the key schedule for masked Pyjamask-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 4]; - -} pyjamask_masked_128_key_schedule_t; - -/** - * \brief Structure of the key schedule for masked Pyjamask-96. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 3]; - -} pyjamask_masked_96_key_schedule_t; - -/** - * \brief Sets up the key schedule for the Pyjamask-128 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_128_setup_key - (pyjamask_128_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Sets up the key schedule for the Pyjamask-96 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_96_setup_key - (pyjamask_96_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with Pyjamask-128. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_128_decrypt() - */ -void pyjamask_128_encrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with Pyjamask-128. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_128_encrypt() - */ -void pyjamask_128_decrypt - (const pyjamask_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 96-bit block with Pyjamask-96. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_96_decrypt() - */ -void pyjamask_96_encrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 96-bit block with Pyjamask-96. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_96_encrypt() - */ -void pyjamask_96_decrypt - (const pyjamask_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Sets up the key schedule for the masked Pyjamask-128 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_masked_128_setup_key - (pyjamask_masked_128_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Sets up the key schedule for the masked Pyjamask-96 block cipher. - * - * \param ks The key schedule on output. - * \param key The 16 bytes of the key on input. - */ -void pyjamask_masked_96_setup_key - (pyjamask_masked_96_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with Pyjamask-128 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_masked_128_decrypt() - */ -void pyjamask_masked_128_encrypt - (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with Pyjamask-128 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_masked_128_encrypt() - */ -void pyjamask_masked_128_decrypt - (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 96-bit block with Pyjamask-96 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * \sa pyjamask_masked_96_decrypt() - */ -void pyjamask_masked_96_encrypt - (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 96-bit block with Pyjamask-96 in masked mode. - * - * \param ks Points to the key schedule. - * \param output Output buffer which must be at least 12 bytes in length. - * \param input Input buffer which must be at least 12 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - * - * \sa pyjamask_masked_96_encrypt() - */ -void pyjamask_masked_96_decrypt - (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-util.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask-96.c b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask-96.c deleted file mode 100644 index 37f508d..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask-96.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "pyjamask.h" -#include "internal-pyjamask.h" - -aead_cipher_t const pyjamask_96_cipher = { - "Pyjamask-96-AEAD", - PYJAMASK_96_KEY_SIZE, - PYJAMASK_96_NONCE_SIZE, - PYJAMASK_96_TAG_SIZE, - AEAD_FLAG_NONE, - pyjamask_96_aead_encrypt, - pyjamask_96_aead_decrypt -}; - -#define OCB_ALG_NAME pyjamask_96 -#define OCB_BLOCK_SIZE 12 -#define OCB_NONCE_SIZE PYJAMASK_96_NONCE_SIZE -#define OCB_TAG_SIZE PYJAMASK_96_TAG_SIZE -#define OCB_KEY_SCHEDULE pyjamask_96_key_schedule_t -#define OCB_SETUP_KEY pyjamask_96_setup_key -#define OCB_ENCRYPT_BLOCK pyjamask_96_encrypt -#define OCB_DECRYPT_BLOCK pyjamask_96_decrypt -#include "internal-ocb.h" diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask.h deleted file mode 100644 index 23ec744..0000000 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys-avr/pyjamask.h +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_PYJAMASK_H -#define LWCRYPTO_PYJAMASK_H - -#include "aead-common.h" - -/** - * \file pyjamask.h - * \brief Pyjamask authenticated encryption algorithm. - * - * Pyjamask AEAD is a family of authenticated encryption algorithms that are - * built around the Pyjamask-128 and Pyjamask-96 block ciphers in OCB mode. - * Pyjamask-128-AEAD has a 128-bit key, a 96-bit nonce, and a 128-bit - * authentication tag. Pyjamask-96-AEAD has a 128-bit key, a 64-bit nonce, - * and a 96-bit authentication tag. - * - * References: https://pyjamask-cipher.github.io/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Pyjamask-128-AEAD. - */ -#define PYJAMASK_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Pyjamask-128-AEAD. - */ -#define PYJAMASK_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Pyjamask-128-AEAD. - */ -#define PYJAMASK_128_NONCE_SIZE 12 - -/** - * \brief Size of the key for Pyjamask-96-AEAD. - */ -#define PYJAMASK_96_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Pyjamask-96-AEAD. - */ -#define PYJAMASK_96_TAG_SIZE 12 - -/** - * \brief Size of the nonce for Pyjamask-96-AEAD. - */ -#define PYJAMASK_96_NONCE_SIZE 8 - -/** - * \brief Meta-information block for the Pyjamask-128-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_128_cipher; - -/** - * \brief Meta-information block for the Pyjamask-96-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_96_cipher; - -/** - * \brief Meta-information block for the masked Pyjamask-128-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_masked_128_cipher; - -/** - * \brief Meta-information block for the masked Pyjamask-96-AEAD cipher. - */ -extern aead_cipher_t const pyjamask_masked_96_cipher; - -/** - * \brief Encrypts and authenticates a packet with Pyjamask-128-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_128_aead_decrypt() - */ -int pyjamask_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Pyjamask-128-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_128_aead_encrypt() - */ -int pyjamask_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Pyjamask-96-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 12 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_96_aead_decrypt() - */ -int pyjamask_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Pyjamask-96-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 12 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_96_aead_encrypt() - */ -int pyjamask_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with masked Pyjamask-128-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_masked_128_aead_decrypt() - */ -int pyjamask_masked_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with masked Pyjamask-128-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_masked_128_aead_encrypt() - */ -int pyjamask_masked_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with masked Pyjamask-96-AEAD. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 12 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa pyjamask_masked_96_aead_decrypt() - */ -int pyjamask_masked_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with masked Pyjamask-96-AEAD. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 12 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa pyjamask_masked_96_aead_encrypt() - */ -int pyjamask_masked_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-ocb.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-ocb.h index de544ba..98f2a31 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-ocb.h +++ b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-ocb.h @@ -62,7 +62,9 @@ #define OCB_DOUBLE_L OCB_CONCAT(OCB_ALG_NAME,_double_l) -/* Double a value in GF(128) - default implementation */ +#if OCB_BLOCK_SIZE == 16 + +/* Double a value in GF(128) */ static void OCB_DOUBLE_L(unsigned char out[16], const unsigned char in[16]) { unsigned index; @@ -72,6 +74,24 @@ static void OCB_DOUBLE_L(unsigned char out[16], const unsigned char in[16]) out[15] = (in[15] << 1) ^ (mask & 0x87); } +#elif OCB_BLOCK_SIZE == 12 + +/* Double a value in GF(96) */ +static void OCB_DOUBLE_L + (unsigned char out[12], const unsigned char in[12]) +{ + unsigned index; + unsigned char mask = (unsigned char)(((signed char)in[0]) >> 7); + for (index = 0; index < 11; ++index) + out[index] = (in[index] << 1) | (in[index + 1] >> 7); + out[11] = (in[11] << 1) ^ (mask & 0x41); + out[10] ^= (mask & 0x06); +} + +#else +#error "Unknown block size for OCB" +#endif + #endif /* State information for OCB functions */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask-avr.S b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask-avr.S new file mode 100644 index 0000000..b7cc631 --- /dev/null +++ b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask-avr.S @@ -0,0 +1,8883 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global pyjamask_96_setup_key + .type pyjamask_96_setup_key, @function +pyjamask_96_setup_key: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + mov r26,r1 +29: + movw r12,r18 + movw r14,r20 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,202 + mov r12,r27 + ldi r27,185 + mov r13,r27 + ldi r27,129 + mov r14,r27 + ldi r27,184 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,229 + ldi r25,220 + ldi r16,64 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,114 + ldi r25,110 + ldi r16,32 + ldi r17,174 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,57 + ldi r25,55 + ldi r16,16 + ldi r17,87 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,156 + ldi r25,27 + ldi r16,136 + ldi r17,171 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,206 + ldi r25,13 + ldi r16,196 + ldi r17,85 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,231 + ldi r25,6 + ldi r16,226 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,115 + ldi r25,3 + ldi r16,113 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,185 + ldi r25,129 + ldi r16,184 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,220 + ldi r25,64 + ldi r16,92 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,110 + ldi r25,32 + ldi r16,174 + ldi r17,114 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,55 + ldi r25,16 + ldi r16,87 + ldi r17,57 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,27 + ldi r25,136 + ldi r16,171 + ldi r17,156 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,13 + ldi r25,196 + ldi r16,85 + ldi r17,206 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,6 + ldi r25,226 + ldi r16,42 + ldi r17,231 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,3 + ldi r25,113 + ldi r16,149 + ldi r17,115 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,129 + ldi r25,184 + ldi r16,202 + ldi r17,185 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,92 + ldi r16,229 + ldi r17,220 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,32 + ldi r25,174 + ldi r16,114 + ldi r17,110 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,16 + ldi r25,87 + ldi r16,57 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,136 + ldi r25,171 + ldi r16,156 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,196 + ldi r25,85 + ldi r16,206 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,226 + ldi r25,42 + ldi r16,231 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,113 + ldi r25,149 + ldi r16,115 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,184 + ldi r25,202 + ldi r16,185 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,92 + ldi r25,229 + ldi r16,220 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,174 + ldi r25,114 + ldi r16,110 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,87 + ldi r25,57 + ldi r16,55 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,171 + ldi r25,156 + ldi r16,27 + ldi r17,136 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,85 + ldi r25,206 + ldi r16,13 + ldi r17,196 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,42 + ldi r25,231 + ldi r16,6 + ldi r17,226 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,149 + ldi r25,115 + ldi r16,3 + ldi r17,113 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + ldi r25,128 + eor r18,r25 + eor r18,r26 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + ldi r24,106 + eor r23,r24 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + lsl r4 + rol r5 + rol r6 + rol r7 + adc r4,r1 + ldi r17,63 + eor r6,r17 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + ldi r16,36 + eor r11,r16 + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + inc r26 + ldi r27,14 + cpse r26,r27 + rjmp 29b + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size pyjamask_96_setup_key, .-pyjamask_96_setup_key + + .text +.global pyjamask_96_encrypt + .type pyjamask_96_encrypt, @function +pyjamask_96_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 16 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ldi r26,14 +13: + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r18 + and r0,r22 + eor r4,r0 + mov r0,r19 + and r0,r23 + eor r5,r0 + mov r0,r20 + and r0,r2 + eor r6,r0 + mov r0,r21 + and r0,r3 + eor r7,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r18 + and r0,r4 + eor r22,r0 + mov r0,r19 + and r0,r5 + eor r23,r0 + mov r0,r20 + and r0,r6 + eor r2,r0 + mov r0,r21 + and r0,r7 + eor r3,r0 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + com r4 + com r5 + com r6 + com r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,133 + mov r8,r27 + ldi r27,16 + mov r9,r27 + ldi r27,134 + mov r10,r27 + ldi r27,163 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,8 + ldi r16,195 + ldi r17,209 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,132 + ldi r16,225 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,16 + ldi r25,194 + ldi r16,112 + ldi r17,180 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,8 + ldi r25,97 + ldi r16,56 + ldi r17,90 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,48 + ldi r16,28 + ldi r17,45 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,24 + ldi r16,142 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,12 + ldi r16,71 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,16 + ldi r25,134 + ldi r16,163 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,8 + ldi r25,195 + ldi r16,209 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,132 + ldi r25,225 + ldi r16,104 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,194 + ldi r25,112 + ldi r16,180 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,97 + ldi r25,56 + ldi r16,90 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,48 + ldi r25,28 + ldi r16,45 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,24 + ldi r25,142 + ldi r16,22 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,12 + ldi r25,71 + ldi r16,11 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,134 + ldi r25,163 + ldi r16,133 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,195 + ldi r25,209 + ldi r16,66 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,225 + ldi r25,104 + ldi r16,33 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,112 + ldi r25,180 + ldi r16,16 + ldi r17,194 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,56 + ldi r25,90 + ldi r16,8 + ldi r17,97 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,28 + ldi r25,45 + ldi r16,132 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,142 + ldi r25,22 + ldi r16,66 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,71 + ldi r25,11 + ldi r16,33 + ldi r17,12 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,163 + ldi r25,133 + ldi r16,16 + ldi r17,134 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,209 + ldi r25,66 + ldi r16,8 + ldi r17,195 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,104 + ldi r25,33 + ldi r16,132 + ldi r17,225 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,180 + ldi r25,16 + ldi r16,194 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,90 + ldi r25,8 + ldi r16,97 + ldi r17,56 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,45 + ldi r25,132 + ldi r16,48 + ldi r17,28 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,22 + ldi r25,66 + ldi r16,24 + ldi r17,142 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,11 + ldi r25,33 + ldi r16,12 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r18,r8 + movw r20,r10 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,33 + mov r8,r27 + ldi r27,112 + mov r9,r27 + ldi r27,65 + mov r10,r27 + ldi r27,99 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,16 + ldi r25,184 + ldi r16,160 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,8 + ldi r25,92 + ldi r16,208 + ldi r17,88 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,4 + ldi r25,46 + ldi r16,104 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,2 + ldi r25,23 + ldi r16,52 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,129 + ldi r25,11 + ldi r16,26 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,192 + ldi r25,5 + ldi r16,141 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,224 + ldi r25,130 + ldi r16,198 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,112 + ldi r25,65 + ldi r16,99 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,184 + ldi r25,160 + ldi r16,177 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,92 + ldi r25,208 + ldi r16,88 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,46 + ldi r25,104 + ldi r16,44 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,23 + ldi r25,52 + ldi r16,22 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,11 + ldi r25,26 + ldi r16,11 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,5 + ldi r25,141 + ldi r16,133 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,130 + ldi r25,198 + ldi r16,66 + ldi r17,224 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,65 + ldi r25,99 + ldi r16,33 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,160 + ldi r25,177 + ldi r16,16 + ldi r17,184 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,208 + ldi r25,88 + ldi r16,8 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,104 + ldi r25,44 + ldi r16,4 + ldi r17,46 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,52 + ldi r25,22 + ldi r16,2 + ldi r17,23 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,26 + ldi r25,11 + ldi r16,129 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,141 + ldi r25,133 + ldi r16,192 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,198 + ldi r25,66 + ldi r16,224 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,99 + ldi r25,33 + ldi r16,112 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,177 + ldi r25,16 + ldi r16,184 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,88 + ldi r25,8 + ldi r16,92 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,44 + ldi r25,4 + ldi r16,46 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,22 + ldi r25,2 + ldi r16,23 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,11 + ldi r25,129 + ldi r16,11 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,133 + ldi r25,192 + ldi r16,5 + ldi r17,141 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,66 + ldi r25,224 + ldi r16,130 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r22,r8 + movw r2,r10 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,128 + mov r8,r27 + ldi r27,242 + mov r9,r27 + ldi r27,44 + mov r10,r27 + ldi r27,105 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,64 + ldi r25,121 + ldi r16,150 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,160 + ldi r25,60 + ldi r16,75 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,80 + ldi r25,158 + ldi r16,37 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,40 + ldi r25,207 + ldi r16,146 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,148 + ldi r25,103 + ldi r16,73 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,202 + ldi r25,179 + ldi r16,164 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,229 + ldi r25,89 + ldi r16,210 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,242 + ldi r25,44 + ldi r16,105 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,121 + ldi r25,150 + ldi r16,52 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,60 + ldi r25,75 + ldi r16,26 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,158 + ldi r25,37 + ldi r16,13 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,207 + ldi r25,146 + ldi r16,6 + ldi r17,40 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,103 + ldi r25,73 + ldi r16,3 + ldi r17,148 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,179 + ldi r25,164 + ldi r16,1 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,89 + ldi r25,210 + mov r16,r1 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,44 + ldi r25,105 + ldi r16,128 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,150 + ldi r25,52 + ldi r16,64 + ldi r17,121 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,75 + ldi r25,26 + ldi r16,160 + ldi r17,60 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,37 + ldi r25,13 + ldi r16,80 + ldi r17,158 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,146 + ldi r25,6 + ldi r16,40 + ldi r17,207 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,73 + ldi r25,3 + ldi r16,148 + ldi r17,103 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,164 + ldi r25,1 + ldi r16,202 + ldi r17,179 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,210 + mov r25,r1 + ldi r16,229 + ldi r17,89 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,105 + ldi r25,128 + ldi r16,242 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,52 + ldi r25,64 + ldi r16,121 + ldi r17,150 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,26 + ldi r25,160 + ldi r16,60 + ldi r17,75 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,13 + ldi r25,80 + ldi r16,158 + ldi r17,37 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,6 + ldi r25,40 + ldi r16,207 + ldi r17,146 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,3 + ldi r25,148 + ldi r16,103 + ldi r17,73 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,1 + ldi r25,202 + ldi r16,179 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + mov r24,r1 + ldi r25,229 + ldi r16,89 + ldi r17,210 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r4,r8 + movw r6,r10 + dec r26 + breq 6545f + rjmp 13b +6545: + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ld r8,Z+ + ld r9,Z+ + ld r10,Z+ + ld r11,Z+ + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + pop r0 + pop r0 + pop r17 + pop r16 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_96_encrypt, .-pyjamask_96_encrypt + + .text +.global pyjamask_96_decrypt + .type pyjamask_96_decrypt, @function +pyjamask_96_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 16 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + subi r30,76 + sbci r31,255 + ld r9,-Z + ld r8,-Z + ld r27,-Z + ld r26,-Z + eor r4,r26 + eor r5,r27 + eor r6,r8 + eor r7,r9 + ld r9,-Z + ld r8,-Z + ld r27,-Z + ld r26,-Z + eor r22,r26 + eor r23,r27 + eor r2,r8 + eor r3,r9 + ld r9,-Z + ld r8,-Z + ld r27,-Z + ld r26,-Z + eor r18,r26 + eor r19,r27 + eor r20,r8 + eor r21,r9 + ldi r26,14 +39: + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,33 + mov r8,r27 + ldi r27,161 + mov r9,r27 + ldi r27,55 + mov r10,r27 + ldi r27,32 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,144 + ldi r25,208 + ldi r16,27 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,72 + ldi r25,232 + ldi r16,13 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,36 + ldi r25,244 + ldi r16,6 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,18 + ldi r25,122 + ldi r16,3 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,9 + ldi r25,189 + ldi r16,1 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,222 + ldi r16,128 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,111 + ldi r16,64 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,161 + ldi r25,55 + ldi r16,32 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,208 + ldi r25,27 + ldi r16,144 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,232 + ldi r25,13 + ldi r16,72 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,244 + ldi r25,6 + ldi r16,36 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,122 + ldi r25,3 + ldi r16,18 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,189 + ldi r25,1 + ldi r16,9 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,222 + ldi r25,128 + ldi r16,132 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,111 + ldi r25,64 + ldi r16,66 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,55 + ldi r25,32 + ldi r16,33 + ldi r17,161 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,27 + ldi r25,144 + ldi r16,144 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,13 + ldi r25,72 + ldi r16,72 + ldi r17,232 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,6 + ldi r25,36 + ldi r16,36 + ldi r17,244 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,3 + ldi r25,18 + ldi r16,18 + ldi r17,122 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,1 + ldi r25,9 + ldi r16,9 + ldi r17,189 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,128 + ldi r25,132 + ldi r16,132 + ldi r17,222 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,66 + ldi r16,66 + ldi r17,111 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,32 + ldi r25,33 + ldi r16,161 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,144 + ldi r25,144 + ldi r16,208 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,72 + ldi r25,72 + ldi r16,232 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,36 + ldi r25,36 + ldi r16,244 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,18 + ldi r25,18 + ldi r16,122 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,9 + ldi r25,9 + ldi r16,189 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,132 + ldi r25,132 + ldi r16,222 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,66 + ldi r25,66 + ldi r16,111 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r18,r8 + movw r20,r10 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,160 + mov r8,r27 + ldi r27,242 + mov r9,r27 + ldi r27,143 + mov r10,r27 + ldi r27,16 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,80 + ldi r25,249 + ldi r16,71 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,168 + ldi r25,252 + ldi r16,35 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,84 + ldi r25,254 + ldi r16,17 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,42 + ldi r25,255 + ldi r16,8 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,149 + ldi r25,127 + ldi r16,132 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,202 + ldi r25,63 + ldi r16,66 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,229 + ldi r25,31 + ldi r16,33 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,242 + ldi r25,143 + ldi r16,16 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,249 + ldi r25,71 + ldi r16,8 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,252 + ldi r25,35 + ldi r16,4 + ldi r17,168 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,254 + ldi r25,17 + ldi r16,2 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,255 + ldi r25,8 + ldi r16,1 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,127 + ldi r25,132 + mov r16,r1 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,63 + ldi r25,66 + ldi r16,128 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,31 + ldi r25,33 + ldi r16,64 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,143 + ldi r25,16 + ldi r16,160 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,71 + ldi r25,8 + ldi r16,80 + ldi r17,249 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,35 + ldi r25,4 + ldi r16,168 + ldi r17,252 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,17 + ldi r25,2 + ldi r16,84 + ldi r17,254 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,8 + ldi r25,1 + ldi r16,42 + ldi r17,255 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,132 + mov r25,r1 + ldi r16,149 + ldi r17,127 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,66 + ldi r25,128 + ldi r16,202 + ldi r17,63 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,33 + ldi r25,64 + ldi r16,229 + ldi r17,31 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,16 + ldi r25,160 + ldi r16,242 + ldi r17,143 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,8 + ldi r25,80 + ldi r16,249 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,4 + ldi r25,168 + ldi r16,252 + ldi r17,35 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,2 + ldi r25,84 + ldi r16,254 + ldi r17,17 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,1 + ldi r25,42 + ldi r16,255 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + mov r24,r1 + ldi r25,149 + ldi r16,127 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,128 + ldi r25,202 + ldi r16,63 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,64 + ldi r25,229 + ldi r16,31 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r22,r8 + movw r2,r10 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,192 + mov r8,r27 + ldi r27,216 + mov r9,r27 + ldi r27,84 + mov r10,r27 + ldi r27,144 + mov r11,r27 + and r8,r0 + and r9,r0 + and r10,r0 + and r11,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,96 + ldi r25,108 + ldi r16,42 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,48 + ldi r25,54 + ldi r16,21 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,24 + ldi r25,155 + ldi r16,10 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,140 + ldi r25,77 + ldi r16,5 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,198 + ldi r25,166 + ldi r16,130 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,99 + ldi r25,83 + ldi r16,65 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,177 + ldi r25,169 + ldi r16,32 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,216 + ldi r25,84 + ldi r16,144 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,108 + ldi r25,42 + ldi r16,72 + ldi r17,96 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,54 + ldi r25,21 + ldi r16,36 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,155 + ldi r25,10 + ldi r16,18 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,77 + ldi r25,5 + ldi r16,9 + ldi r17,140 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,166 + ldi r25,130 + ldi r16,4 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,83 + ldi r25,65 + ldi r16,2 + ldi r17,99 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,169 + ldi r25,32 + ldi r16,129 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,84 + ldi r25,144 + ldi r16,192 + ldi r17,216 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,42 + ldi r25,72 + ldi r16,96 + ldi r17,108 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,21 + ldi r25,36 + ldi r16,48 + ldi r17,54 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,10 + ldi r25,18 + ldi r16,24 + ldi r17,155 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,5 + ldi r25,9 + ldi r16,140 + ldi r17,77 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,130 + ldi r25,4 + ldi r16,198 + ldi r17,166 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,65 + ldi r25,2 + ldi r16,99 + ldi r17,83 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,32 + ldi r25,129 + ldi r16,177 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,144 + ldi r25,192 + ldi r16,216 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,72 + ldi r25,96 + ldi r16,108 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,36 + ldi r25,48 + ldi r16,54 + ldi r17,21 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,18 + ldi r25,24 + ldi r16,155 + ldi r17,10 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,9 + ldi r25,140 + ldi r16,77 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,4 + ldi r25,198 + ldi r16,166 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,2 + ldi r25,99 + ldi r16,83 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,129 + ldi r25,177 + ldi r16,169 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r8,r24 + eor r9,r25 + eor r10,r16 + eor r11,r17 + movw r4,r8 + movw r6,r10 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + com r4 + com r5 + com r6 + com r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r18 + and r0,r4 + eor r22,r0 + mov r0,r19 + and r0,r5 + eor r23,r0 + mov r0,r20 + and r0,r6 + eor r2,r0 + mov r0,r21 + and r0,r7 + eor r3,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r18 + and r0,r22 + eor r4,r0 + mov r0,r19 + and r0,r23 + eor r5,r0 + mov r0,r20 + and r0,r2 + eor r6,r0 + mov r0,r21 + and r0,r3 + eor r7,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + ld r11,-Z + ld r10,-Z + ld r9,-Z + ld r8,-Z + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + ld r11,-Z + ld r10,-Z + ld r9,-Z + ld r8,-Z + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + ld r11,-Z + ld r10,-Z + ld r9,-Z + ld r8,-Z + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + dec r26 + breq 6571f + rjmp 39b +6571: + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + pop r0 + pop r0 + pop r17 + pop r16 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_96_decrypt, .-pyjamask_96_decrypt + + .text +.global pyjamask_128_setup_key + .type pyjamask_128_setup_key, @function +pyjamask_128_setup_key: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r26,r1 +33: + movw r12,r18 + movw r14,r20 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r12,r4 + eor r13,r5 + eor r14,r6 + eor r15,r7 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,202 + mov r12,r27 + ldi r27,185 + mov r13,r27 + ldi r27,129 + mov r14,r27 + ldi r27,184 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,229 + ldi r25,220 + ldi r16,64 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,114 + ldi r25,110 + ldi r16,32 + ldi r17,174 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,57 + ldi r25,55 + ldi r16,16 + ldi r17,87 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,156 + ldi r25,27 + ldi r16,136 + ldi r17,171 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,206 + ldi r25,13 + ldi r16,196 + ldi r17,85 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,231 + ldi r25,6 + ldi r16,226 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,115 + ldi r25,3 + ldi r16,113 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,185 + ldi r25,129 + ldi r16,184 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,220 + ldi r25,64 + ldi r16,92 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,110 + ldi r25,32 + ldi r16,174 + ldi r17,114 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,55 + ldi r25,16 + ldi r16,87 + ldi r17,57 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,27 + ldi r25,136 + ldi r16,171 + ldi r17,156 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,13 + ldi r25,196 + ldi r16,85 + ldi r17,206 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,6 + ldi r25,226 + ldi r16,42 + ldi r17,231 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,3 + ldi r25,113 + ldi r16,149 + ldi r17,115 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,129 + ldi r25,184 + ldi r16,202 + ldi r17,185 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,92 + ldi r16,229 + ldi r17,220 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,32 + ldi r25,174 + ldi r16,114 + ldi r17,110 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,16 + ldi r25,87 + ldi r16,57 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,136 + ldi r25,171 + ldi r16,156 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,196 + ldi r25,85 + ldi r16,206 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,226 + ldi r25,42 + ldi r16,231 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,113 + ldi r25,149 + ldi r16,115 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,184 + ldi r25,202 + ldi r16,185 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,92 + ldi r25,229 + ldi r16,220 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,174 + ldi r25,114 + ldi r16,110 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,87 + ldi r25,57 + ldi r16,55 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,171 + ldi r25,156 + ldi r16,27 + ldi r17,136 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,85 + ldi r25,206 + ldi r16,13 + ldi r17,196 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,42 + ldi r25,231 + ldi r16,6 + ldi r17,226 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,149 + ldi r25,115 + ldi r16,3 + ldi r17,113 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + ldi r25,128 + eor r18,r25 + eor r18,r26 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + ldi r24,106 + eor r23,r24 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + lsl r4 + rol r5 + rol r6 + rol r7 + adc r4,r1 + ldi r17,63 + eor r6,r17 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + lsr r11 + ror r10 + ror r9 + ror r8 + ror r0 + or r11,r0 + ldi r16,36 + eor r11,r16 + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + inc r26 + ldi r27,14 + cpse r26,r27 + rjmp 33b + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size pyjamask_128_setup_key, .-pyjamask_128_setup_key + + .text +.global pyjamask_128_encrypt + .type pyjamask_128_encrypt, @function +pyjamask_128_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ldi r26,14 +17: + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + mov r0,r18 + and r0,r22 + eor r8,r0 + mov r0,r19 + and r0,r23 + eor r9,r0 + mov r0,r20 + and r0,r2 + eor r10,r0 + mov r0,r21 + and r0,r3 + eor r11,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r4 + and r0,r8 + eor r22,r0 + mov r0,r5 + and r0,r9 + eor r23,r0 + mov r0,r6 + and r0,r10 + eor r2,r0 + mov r0,r7 + and r0,r11 + eor r3,r0 + mov r0,r18 + and r0,r8 + eor r4,r0 + mov r0,r19 + and r0,r9 + eor r5,r0 + mov r0,r20 + and r0,r10 + eor r6,r0 + mov r0,r21 + and r0,r11 + eor r7,r0 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + com r8 + com r9 + com r10 + com r11 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,133 + mov r12,r27 + ldi r27,16 + mov r13,r27 + ldi r27,134 + mov r14,r27 + ldi r27,163 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,8 + ldi r16,195 + ldi r17,209 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,132 + ldi r16,225 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,16 + ldi r25,194 + ldi r16,112 + ldi r17,180 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,8 + ldi r25,97 + ldi r16,56 + ldi r17,90 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,48 + ldi r16,28 + ldi r17,45 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,24 + ldi r16,142 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,33 + ldi r25,12 + ldi r16,71 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,16 + ldi r25,134 + ldi r16,163 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,8 + ldi r25,195 + ldi r16,209 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,132 + ldi r25,225 + ldi r16,104 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,194 + ldi r25,112 + ldi r16,180 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,97 + ldi r25,56 + ldi r16,90 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,48 + ldi r25,28 + ldi r16,45 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,24 + ldi r25,142 + ldi r16,22 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,12 + ldi r25,71 + ldi r16,11 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,134 + ldi r25,163 + ldi r16,133 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,195 + ldi r25,209 + ldi r16,66 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,225 + ldi r25,104 + ldi r16,33 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,112 + ldi r25,180 + ldi r16,16 + ldi r17,194 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,56 + ldi r25,90 + ldi r16,8 + ldi r17,97 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,28 + ldi r25,45 + ldi r16,132 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,142 + ldi r25,22 + ldi r16,66 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,71 + ldi r25,11 + ldi r16,33 + ldi r17,12 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,163 + ldi r25,133 + ldi r16,16 + ldi r17,134 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,209 + ldi r25,66 + ldi r16,8 + ldi r17,195 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,104 + ldi r25,33 + ldi r16,132 + ldi r17,225 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,180 + ldi r25,16 + ldi r16,194 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,90 + ldi r25,8 + ldi r16,97 + ldi r17,56 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,45 + ldi r25,132 + ldi r16,48 + ldi r17,28 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,22 + ldi r25,66 + ldi r16,24 + ldi r17,142 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,11 + ldi r25,33 + ldi r16,12 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,33 + mov r12,r27 + ldi r27,112 + mov r13,r27 + ldi r27,65 + mov r14,r27 + ldi r27,99 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,16 + ldi r25,184 + ldi r16,160 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,8 + ldi r25,92 + ldi r16,208 + ldi r17,88 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,4 + ldi r25,46 + ldi r16,104 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,2 + ldi r25,23 + ldi r16,52 + ldi r17,22 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,129 + ldi r25,11 + ldi r16,26 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,192 + ldi r25,5 + ldi r16,141 + ldi r17,133 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,224 + ldi r25,130 + ldi r16,198 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,112 + ldi r25,65 + ldi r16,99 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,184 + ldi r25,160 + ldi r16,177 + ldi r17,16 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,92 + ldi r25,208 + ldi r16,88 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,46 + ldi r25,104 + ldi r16,44 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,23 + ldi r25,52 + ldi r16,22 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,11 + ldi r25,26 + ldi r16,11 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,5 + ldi r25,141 + ldi r16,133 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,130 + ldi r25,198 + ldi r16,66 + ldi r17,224 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,65 + ldi r25,99 + ldi r16,33 + ldi r17,112 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,160 + ldi r25,177 + ldi r16,16 + ldi r17,184 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,208 + ldi r25,88 + ldi r16,8 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,104 + ldi r25,44 + ldi r16,4 + ldi r17,46 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,52 + ldi r25,22 + ldi r16,2 + ldi r17,23 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,26 + ldi r25,11 + ldi r16,129 + ldi r17,11 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,141 + ldi r25,133 + ldi r16,192 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,198 + ldi r25,66 + ldi r16,224 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,99 + ldi r25,33 + ldi r16,112 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,177 + ldi r25,16 + ldi r16,184 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,88 + ldi r25,8 + ldi r16,92 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,44 + ldi r25,4 + ldi r16,46 + ldi r17,104 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,22 + ldi r25,2 + ldi r16,23 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,11 + ldi r25,129 + ldi r16,11 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,133 + ldi r25,192 + ldi r16,5 + ldi r17,141 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,66 + ldi r25,224 + ldi r16,130 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r22,r12 + movw r2,r14 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,128 + mov r12,r27 + ldi r27,242 + mov r13,r27 + ldi r27,44 + mov r14,r27 + ldi r27,105 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,64 + ldi r25,121 + ldi r16,150 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,160 + ldi r25,60 + ldi r16,75 + ldi r17,26 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,80 + ldi r25,158 + ldi r16,37 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,40 + ldi r25,207 + ldi r16,146 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,148 + ldi r25,103 + ldi r16,73 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,202 + ldi r25,179 + ldi r16,164 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,229 + ldi r25,89 + ldi r16,210 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,242 + ldi r25,44 + ldi r16,105 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,121 + ldi r25,150 + ldi r16,52 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,60 + ldi r25,75 + ldi r16,26 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,158 + ldi r25,37 + ldi r16,13 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,207 + ldi r25,146 + ldi r16,6 + ldi r17,40 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,103 + ldi r25,73 + ldi r16,3 + ldi r17,148 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,179 + ldi r25,164 + ldi r16,1 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,89 + ldi r25,210 + mov r16,r1 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,44 + ldi r25,105 + ldi r16,128 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,150 + ldi r25,52 + ldi r16,64 + ldi r17,121 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,75 + ldi r25,26 + ldi r16,160 + ldi r17,60 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,37 + ldi r25,13 + ldi r16,80 + ldi r17,158 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,146 + ldi r25,6 + ldi r16,40 + ldi r17,207 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,73 + ldi r25,3 + ldi r16,148 + ldi r17,103 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,164 + ldi r25,1 + ldi r16,202 + ldi r17,179 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,210 + mov r25,r1 + ldi r16,229 + ldi r17,89 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,105 + ldi r25,128 + ldi r16,242 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,52 + ldi r25,64 + ldi r16,121 + ldi r17,150 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,26 + ldi r25,160 + ldi r16,60 + ldi r17,75 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,13 + ldi r25,80 + ldi r16,158 + ldi r17,37 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,6 + ldi r25,40 + ldi r16,207 + ldi r17,146 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,3 + ldi r25,148 + ldi r16,103 + ldi r17,73 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,1 + ldi r25,202 + ldi r16,179 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + mov r24,r1 + ldi r25,229 + ldi r16,89 + ldi r17,210 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r4,r12 + movw r6,r14 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r27,19 + mov r12,r27 + ldi r27,72 + mov r13,r27 + ldi r27,165 + mov r14,r27 + ldi r27,72 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,9 + ldi r25,164 + ldi r16,82 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,4 + ldi r25,82 + ldi r16,41 + ldi r17,210 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,2 + ldi r25,169 + ldi r16,20 + ldi r17,105 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,129 + ldi r25,84 + ldi r16,138 + ldi r17,52 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,64 + ldi r25,42 + ldi r16,69 + ldi r17,154 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,32 + ldi r25,149 + ldi r16,34 + ldi r17,77 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,144 + ldi r25,74 + ldi r16,145 + ldi r17,38 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,72 + ldi r25,165 + ldi r16,72 + ldi r17,19 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,164 + ldi r25,82 + ldi r16,164 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,82 + ldi r25,41 + ldi r16,210 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,169 + ldi r25,20 + ldi r16,105 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,84 + ldi r25,138 + ldi r16,52 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,42 + ldi r25,69 + ldi r16,154 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,149 + ldi r25,34 + ldi r16,77 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,74 + ldi r25,145 + ldi r16,38 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,165 + ldi r25,72 + ldi r16,19 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,82 + ldi r25,164 + ldi r16,9 + ldi r17,164 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,41 + ldi r25,210 + ldi r16,4 + ldi r17,82 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,20 + ldi r25,105 + ldi r16,2 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,138 + ldi r25,52 + ldi r16,129 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,69 + ldi r25,154 + ldi r16,64 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,34 + ldi r25,77 + ldi r16,32 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,145 + ldi r25,38 + ldi r16,144 + ldi r17,74 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,72 + ldi r25,19 + ldi r16,72 + ldi r17,165 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,164 + ldi r25,9 + ldi r16,164 + ldi r17,82 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,210 + ldi r25,4 + ldi r16,82 + ldi r17,41 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,105 + ldi r25,2 + ldi r16,169 + ldi r17,20 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,52 + ldi r25,129 + ldi r16,84 + ldi r17,138 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,154 + ldi r25,64 + ldi r16,42 + ldi r17,69 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,77 + ldi r25,32 + ldi r16,149 + ldi r17,34 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,38 + ldi r25,144 + ldi r16,74 + ldi r17,145 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r8,r12 + movw r10,r14 + dec r26 + breq 7055f + rjmp 17b +7055: + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + ld r12,Z+ + ld r13,Z+ + ld r14,Z+ + ld r15,Z+ + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_128_encrypt, .-pyjamask_128_encrypt + + .text +.global pyjamask_128_decrypt + .type pyjamask_128_decrypt, @function +pyjamask_128_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 20 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + subi r30,16 + sbci r31,255 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r8,r26 + eor r9,r27 + eor r10,r12 + eor r11,r13 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r4,r26 + eor r5,r27 + eor r6,r12 + eor r7,r13 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r22,r26 + eor r23,r27 + eor r2,r12 + eor r3,r13 + ld r13,-Z + ld r12,-Z + ld r27,-Z + ld r26,-Z + eor r18,r26 + eor r19,r27 + eor r20,r12 + eor r21,r13 + ldi r26,14 +51: + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r27,33 + mov r12,r27 + ldi r27,161 + mov r13,r27 + ldi r27,55 + mov r14,r27 + ldi r27,32 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,144 + ldi r25,208 + ldi r16,27 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,72 + ldi r25,232 + ldi r16,13 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,36 + ldi r25,244 + ldi r16,6 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,18 + ldi r25,122 + ldi r16,3 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,9 + ldi r25,189 + ldi r16,1 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,132 + ldi r25,222 + ldi r16,128 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r21 + sbc r0,r1 + ldi r24,66 + ldi r25,111 + ldi r16,64 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,161 + ldi r25,55 + ldi r16,32 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,208 + ldi r25,27 + ldi r16,144 + ldi r17,144 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,232 + ldi r25,13 + ldi r16,72 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,244 + ldi r25,6 + ldi r16,36 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,122 + ldi r25,3 + ldi r16,18 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,189 + ldi r25,1 + ldi r16,9 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,222 + ldi r25,128 + ldi r16,132 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r20 + sbc r0,r1 + ldi r24,111 + ldi r25,64 + ldi r16,66 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,55 + ldi r25,32 + ldi r16,33 + ldi r17,161 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,27 + ldi r25,144 + ldi r16,144 + ldi r17,208 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,13 + ldi r25,72 + ldi r16,72 + ldi r17,232 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,6 + ldi r25,36 + ldi r16,36 + ldi r17,244 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,3 + ldi r25,18 + ldi r16,18 + ldi r17,122 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,1 + ldi r25,9 + ldi r16,9 + ldi r17,189 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,128 + ldi r25,132 + ldi r16,132 + ldi r17,222 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r19 + sbc r0,r1 + ldi r24,64 + ldi r25,66 + ldi r16,66 + ldi r17,111 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,32 + ldi r25,33 + ldi r16,161 + ldi r17,55 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,144 + ldi r25,144 + ldi r16,208 + ldi r17,27 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,72 + ldi r25,72 + ldi r16,232 + ldi r17,13 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,36 + ldi r25,36 + ldi r16,244 + ldi r17,6 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,18 + ldi r25,18 + ldi r16,122 + ldi r17,3 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,9 + ldi r25,9 + ldi r16,189 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,132 + ldi r25,132 + ldi r16,222 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r18 + sbc r0,r1 + ldi r24,66 + ldi r25,66 + ldi r16,111 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r18,r12 + movw r20,r14 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r27,160 + mov r12,r27 + ldi r27,242 + mov r13,r27 + ldi r27,143 + mov r14,r27 + ldi r27,16 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,80 + ldi r25,249 + ldi r16,71 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,168 + ldi r25,252 + ldi r16,35 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,84 + ldi r25,254 + ldi r16,17 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,42 + ldi r25,255 + ldi r16,8 + ldi r17,1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,149 + ldi r25,127 + ldi r16,132 + mov r17,r1 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,202 + ldi r25,63 + ldi r16,66 + ldi r17,128 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r3 + sbc r0,r1 + ldi r24,229 + ldi r25,31 + ldi r16,33 + ldi r17,64 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,242 + ldi r25,143 + ldi r16,16 + ldi r17,160 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,249 + ldi r25,71 + ldi r16,8 + ldi r17,80 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,252 + ldi r25,35 + ldi r16,4 + ldi r17,168 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,254 + ldi r25,17 + ldi r16,2 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,255 + ldi r25,8 + ldi r16,1 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,127 + ldi r25,132 + mov r16,r1 + ldi r17,149 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,63 + ldi r25,66 + ldi r16,128 + ldi r17,202 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r2 + sbc r0,r1 + ldi r24,31 + ldi r25,33 + ldi r16,64 + ldi r17,229 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,143 + ldi r25,16 + ldi r16,160 + ldi r17,242 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,71 + ldi r25,8 + ldi r16,80 + ldi r17,249 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,35 + ldi r25,4 + ldi r16,168 + ldi r17,252 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,17 + ldi r25,2 + ldi r16,84 + ldi r17,254 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,8 + ldi r25,1 + ldi r16,42 + ldi r17,255 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,132 + mov r25,r1 + ldi r16,149 + ldi r17,127 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,66 + ldi r25,128 + ldi r16,202 + ldi r17,63 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r23 + sbc r0,r1 + ldi r24,33 + ldi r25,64 + ldi r16,229 + ldi r17,31 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,16 + ldi r25,160 + ldi r16,242 + ldi r17,143 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,8 + ldi r25,80 + ldi r16,249 + ldi r17,71 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,4 + ldi r25,168 + ldi r16,252 + ldi r17,35 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,2 + ldi r25,84 + ldi r16,254 + ldi r17,17 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,1 + ldi r25,42 + ldi r16,255 + ldi r17,8 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + mov r24,r1 + ldi r25,149 + ldi r16,127 + ldi r17,132 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,128 + ldi r25,202 + ldi r16,63 + ldi r17,66 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r22 + sbc r0,r1 + ldi r24,64 + ldi r25,229 + ldi r16,31 + ldi r17,33 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r22,r12 + movw r2,r14 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r27,192 + mov r12,r27 + ldi r27,216 + mov r13,r27 + ldi r27,84 + mov r14,r27 + ldi r27,144 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,96 + ldi r25,108 + ldi r16,42 + ldi r17,72 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,48 + ldi r25,54 + ldi r16,21 + ldi r17,36 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,24 + ldi r25,155 + ldi r16,10 + ldi r17,18 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,140 + ldi r25,77 + ldi r16,5 + ldi r17,9 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,198 + ldi r25,166 + ldi r16,130 + ldi r17,4 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,99 + ldi r25,83 + ldi r16,65 + ldi r17,2 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r7 + sbc r0,r1 + ldi r24,177 + ldi r25,169 + ldi r16,32 + ldi r17,129 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,216 + ldi r25,84 + ldi r16,144 + ldi r17,192 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,108 + ldi r25,42 + ldi r16,72 + ldi r17,96 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,54 + ldi r25,21 + ldi r16,36 + ldi r17,48 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,155 + ldi r25,10 + ldi r16,18 + ldi r17,24 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,77 + ldi r25,5 + ldi r16,9 + ldi r17,140 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,166 + ldi r25,130 + ldi r16,4 + ldi r17,198 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,83 + ldi r25,65 + ldi r16,2 + ldi r17,99 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r6 + sbc r0,r1 + ldi r24,169 + ldi r25,32 + ldi r16,129 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,84 + ldi r25,144 + ldi r16,192 + ldi r17,216 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,42 + ldi r25,72 + ldi r16,96 + ldi r17,108 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,21 + ldi r25,36 + ldi r16,48 + ldi r17,54 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,10 + ldi r25,18 + ldi r16,24 + ldi r17,155 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,5 + ldi r25,9 + ldi r16,140 + ldi r17,77 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,130 + ldi r25,4 + ldi r16,198 + ldi r17,166 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,65 + ldi r25,2 + ldi r16,99 + ldi r17,83 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r5 + sbc r0,r1 + ldi r24,32 + ldi r25,129 + ldi r16,177 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,144 + ldi r25,192 + ldi r16,216 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,72 + ldi r25,96 + ldi r16,108 + ldi r17,42 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,36 + ldi r25,48 + ldi r16,54 + ldi r17,21 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,18 + ldi r25,24 + ldi r16,155 + ldi r17,10 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,9 + ldi r25,140 + ldi r16,77 + ldi r17,5 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,4 + ldi r25,198 + ldi r16,166 + ldi r17,130 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,2 + ldi r25,99 + ldi r16,83 + ldi r17,65 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r4 + sbc r0,r1 + ldi r24,129 + ldi r25,177 + ldi r16,169 + ldi r17,32 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r4,r12 + movw r6,r14 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r27,23 + mov r12,r27 + ldi r27,177 + mov r13,r27 + ldi r27,84 + mov r14,r27 + ldi r27,51 + mov r15,r27 + and r12,r0 + and r13,r0 + and r14,r0 + and r15,r0 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,139 + ldi r25,88 + ldi r16,170 + ldi r17,153 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,69 + ldi r25,44 + ldi r16,213 + ldi r17,204 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,34 + ldi r25,150 + ldi r16,106 + ldi r17,230 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,17 + ldi r25,75 + ldi r16,53 + ldi r17,115 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,136 + ldi r25,165 + ldi r16,154 + ldi r17,185 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,196 + ldi r25,82 + ldi r16,205 + ldi r17,92 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r11 + sbc r0,r1 + ldi r24,98 + ldi r25,169 + ldi r16,102 + ldi r17,46 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,177 + ldi r25,84 + ldi r16,51 + ldi r17,23 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,88 + ldi r25,170 + ldi r16,153 + ldi r17,139 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,44 + ldi r25,213 + ldi r16,204 + ldi r17,69 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,150 + ldi r25,106 + ldi r16,230 + ldi r17,34 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,75 + ldi r25,53 + ldi r16,115 + ldi r17,17 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,165 + ldi r25,154 + ldi r16,185 + ldi r17,136 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,82 + ldi r25,205 + ldi r16,92 + ldi r17,196 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r10 + sbc r0,r1 + ldi r24,169 + ldi r25,102 + ldi r16,46 + ldi r17,98 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,84 + ldi r25,51 + ldi r16,23 + ldi r17,177 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,170 + ldi r25,153 + ldi r16,139 + ldi r17,88 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,213 + ldi r25,204 + ldi r16,69 + ldi r17,44 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,106 + ldi r25,230 + ldi r16,34 + ldi r17,150 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,53 + ldi r25,115 + ldi r16,17 + ldi r17,75 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,154 + ldi r25,185 + ldi r16,136 + ldi r17,165 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,205 + ldi r25,92 + ldi r16,196 + ldi r17,82 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r9 + sbc r0,r1 + ldi r24,102 + ldi r25,46 + ldi r16,98 + ldi r17,169 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,51 + ldi r25,23 + ldi r16,177 + ldi r17,84 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,153 + ldi r25,139 + ldi r16,88 + ldi r17,170 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,204 + ldi r25,69 + ldi r16,44 + ldi r17,213 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,230 + ldi r25,34 + ldi r16,150 + ldi r17,106 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,115 + ldi r25,17 + ldi r16,75 + ldi r17,53 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,185 + ldi r25,136 + ldi r16,165 + ldi r17,154 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,92 + ldi r25,196 + ldi r16,82 + ldi r17,205 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + mov r0,r1 + lsl r8 + sbc r0,r1 + ldi r24,46 + ldi r25,98 + ldi r16,169 + ldi r17,102 + and r24,r0 + and r25,r0 + and r16,r0 + and r17,r0 + eor r12,r24 + eor r13,r25 + eor r14,r16 + eor r15,r17 + movw r8,r12 + movw r10,r14 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + com r8 + com r9 + com r10 + com r11 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + mov r0,r18 + and r0,r8 + eor r4,r0 + mov r0,r19 + and r0,r9 + eor r5,r0 + mov r0,r20 + and r0,r10 + eor r6,r0 + mov r0,r21 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r8 + eor r22,r0 + mov r0,r5 + and r0,r9 + eor r23,r0 + mov r0,r6 + and r0,r10 + eor r2,r0 + mov r0,r7 + and r0,r11 + eor r3,r0 + mov r0,r22 + and r0,r4 + eor r18,r0 + mov r0,r23 + and r0,r5 + eor r19,r0 + mov r0,r2 + and r0,r6 + eor r20,r0 + mov r0,r3 + and r0,r7 + eor r21,r0 + mov r0,r18 + and r0,r22 + eor r8,r0 + mov r0,r19 + and r0,r23 + eor r9,r0 + mov r0,r20 + and r0,r2 + eor r10,r0 + mov r0,r21 + and r0,r3 + eor r11,r0 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ld r15,-Z + ld r14,-Z + ld r13,-Z + ld r12,-Z + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + dec r26 + breq 7089f + rjmp 51b +7089: + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r21 + st X+,r20 + st X+,r19 + st X+,r18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + pop r0 + pop r0 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size pyjamask_128_decrypt, .-pyjamask_128_decrypt + +#endif diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.c b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.c index f3a5655..3c40d2d 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.c +++ b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.c @@ -23,6 +23,8 @@ #include "internal-pyjamask.h" #include "internal-util.h" +#if !defined(__AVR__) + /** * \brief Performs a circulant binary matrix multiplication. * @@ -49,7 +51,8 @@ STATIC_INLINE uint32_t pyjamask_matrix_multiply(uint32_t x, uint32_t y) return result; } -void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key) +void pyjamask_128_setup_key + (pyjamask_128_key_schedule_t *ks, const unsigned char *key) { uint32_t *rk = ks->k; uint32_t k0, k1, k2, k3; @@ -96,8 +99,54 @@ void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key) } } +void pyjamask_96_setup_key + (pyjamask_96_key_schedule_t *ks, const unsigned char *key) +{ + uint32_t *rk = ks->k; + uint32_t k0, k1, k2, k3; + uint32_t temp; + uint8_t round; + + /* Load the words of the key */ + k0 = be_load_word32(key); + k1 = be_load_word32(key + 4); + k2 = be_load_word32(key + 8); + k3 = be_load_word32(key + 12); + + /* The first round key is the same as the key itself */ + rk[0] = k0; + rk[1] = k1; + rk[2] = k2; + rk += 3; + + /* Derive the round keys for all of the other rounds */ + for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { + /* Mix the columns */ + temp = k0 ^ k1 ^ k2 ^ k3; + k0 ^= temp; + k1 ^= temp; + k2 ^= temp; + k3 ^= temp; + + /* Mix the rows and add the round constants. Note that the Pyjamask + * specification says that k1/k2/k3 should be rotated left by 8, 15, + * and 18 bits. But the reference code actually rotates the words + * right. And the test vectors in the specification match up with + * right rotations, not left. We match the reference code here */ + k0 = pyjamask_matrix_multiply(0xb881b9caU, k0) ^ 0x00000080U ^ round; + k1 = rightRotate8(k1) ^ 0x00006a00U; + k2 = rightRotate15(k2) ^ 0x003f0000U; + k3 = rightRotate18(k3) ^ 0x24000000U; + + /* Write the round key to the schedule */ + rk[0] = k0; + rk[1] = k1; + rk[2] = k2; + } +} + void pyjamask_128_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { const uint32_t *rk = ks->k; @@ -152,7 +201,7 @@ void pyjamask_128_encrypt } void pyjamask_128_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { const uint32_t *rk = ks->k + 4 * PYJAMASK_ROUNDS; @@ -208,7 +257,7 @@ void pyjamask_128_decrypt } void pyjamask_96_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { const uint32_t *rk = ks->k; @@ -221,7 +270,7 @@ void pyjamask_96_encrypt s2 = be_load_word32(input + 8); /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 4) { + for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk += 3) { /* Add the round key to the state */ s0 ^= rk[0]; s1 ^= rk[1]; @@ -256,10 +305,10 @@ void pyjamask_96_encrypt } void pyjamask_96_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - const uint32_t *rk = ks->k + 4 * PYJAMASK_ROUNDS; + const uint32_t *rk = ks->k + 3 * PYJAMASK_ROUNDS; uint32_t s0, s1, s2; uint8_t round; @@ -272,10 +321,10 @@ void pyjamask_96_decrypt s0 ^= rk[0]; s1 ^= rk[1]; s2 ^= rk[2]; - rk -= 4; + rk -= 3; /* Perform all encryption rounds */ - for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 4) { + for (round = 0; round < PYJAMASK_ROUNDS; ++round, rk -= 3) { /* Inverse mix of the rows in the state */ s0 = pyjamask_matrix_multiply(0x2037a121U, s0); s1 = pyjamask_matrix_multiply(0x108ff2a0U, s1); @@ -303,3 +352,5 @@ void pyjamask_96_decrypt be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); } + +#endif /* !__AVR__ */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.h index 3fd93a7..3ead7fb 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.h +++ b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-pyjamask.h @@ -45,31 +45,60 @@ extern "C" { #define PYJAMASK_MASKING_ORDER 4 /** - * \brief Structure of the key schedule for Pyjamask block ciphers. + * \brief Structure of the key schedule for the Pyjamask-128 block cipher. */ typedef struct { uint32_t k[(PYJAMASK_ROUNDS + 1) * 4]; /**< Words of the key schedule */ -} pyjamask_key_schedule_t; +} pyjamask_128_key_schedule_t; /** - * \brief Structure of the key schedule for masked Pyjamask block ciphers. + * \brief Structure of the key schedule for the Pyjamask-96 block cipher. + */ +typedef struct +{ + uint32_t k[(PYJAMASK_ROUNDS + 1) * 3]; /**< Words of the key schedule */ + +} pyjamask_96_key_schedule_t; + +/** + * \brief Structure of the key schedule for masked Pyjamask-128. */ typedef struct { /** Words of the key schedule */ uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 4]; -} pyjamask_masked_key_schedule_t; +} pyjamask_masked_128_key_schedule_t; + +/** + * \brief Structure of the key schedule for masked Pyjamask-96. + */ +typedef struct +{ + /** Words of the key schedule */ + uint32_t k[PYJAMASK_MASKING_ORDER * (PYJAMASK_ROUNDS + 1) * 3]; + +} pyjamask_masked_96_key_schedule_t; /** - * \brief Sets up the key schedule for the Pyjamask block cipher. + * \brief Sets up the key schedule for the Pyjamask-128 block cipher. * * \param ks The key schedule on output. * \param key The 16 bytes of the key on input. */ -void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key); +void pyjamask_128_setup_key + (pyjamask_128_key_schedule_t *ks, const unsigned char *key); + +/** + * \brief Sets up the key schedule for the Pyjamask-96 block cipher. + * + * \param ks The key schedule on output. + * \param key The 16 bytes of the key on input. + */ +void pyjamask_96_setup_key + (pyjamask_96_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with Pyjamask-128. @@ -84,7 +113,7 @@ void pyjamask_setup_key(pyjamask_key_schedule_t *ks, const unsigned char *key); * \sa pyjamask_128_decrypt() */ void pyjamask_128_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -100,7 +129,7 @@ void pyjamask_128_encrypt * \sa pyjamask_128_encrypt() */ void pyjamask_128_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -116,7 +145,7 @@ void pyjamask_128_decrypt * \sa pyjamask_96_decrypt() */ void pyjamask_96_encrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -132,17 +161,26 @@ void pyjamask_96_encrypt * \sa pyjamask_96_encrypt() */ void pyjamask_96_decrypt - (const pyjamask_key_schedule_t *ks, unsigned char *output, + (const pyjamask_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** - * \brief Sets up the key schedule for the masked Pyjamask block cipher. + * \brief Sets up the key schedule for the masked Pyjamask-128 block cipher. + * + * \param ks The key schedule on output. + * \param key The 16 bytes of the key on input. + */ +void pyjamask_masked_128_setup_key + (pyjamask_masked_128_key_schedule_t *ks, const unsigned char *key); + +/** + * \brief Sets up the key schedule for the masked Pyjamask-96 block cipher. * * \param ks The key schedule on output. * \param key The 16 bytes of the key on input. */ -void pyjamask_masked_setup_key - (pyjamask_masked_key_schedule_t *ks, const unsigned char *key); +void pyjamask_masked_96_setup_key + (pyjamask_masked_96_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with Pyjamask-128 in masked mode. @@ -157,7 +195,7 @@ void pyjamask_masked_setup_key * \sa pyjamask_masked_128_decrypt() */ void pyjamask_masked_128_encrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -173,7 +211,7 @@ void pyjamask_masked_128_encrypt * \sa pyjamask_masked_128_encrypt() */ void pyjamask_masked_128_decrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_128_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -189,7 +227,7 @@ void pyjamask_masked_128_decrypt * \sa pyjamask_masked_96_decrypt() */ void pyjamask_masked_96_encrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); /** @@ -205,7 +243,7 @@ void pyjamask_masked_96_encrypt * \sa pyjamask_masked_96_encrypt() */ void pyjamask_masked_96_decrypt - (const pyjamask_masked_key_schedule_t *ks, unsigned char *output, + (const pyjamask_masked_96_key_schedule_t *ks, unsigned char *output, const unsigned char *input); #ifdef __cplusplus diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-util.h b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-util.h +++ b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/pyjamask-96.c b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/pyjamask-96.c index 3361699..37f508d 100644 --- a/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/pyjamask-96.c +++ b/pyjamask/Implementations/crypto_aead/pyjamask96aeadv1/rhys/pyjamask-96.c @@ -33,25 +33,12 @@ aead_cipher_t const pyjamask_96_cipher = { pyjamask_96_aead_decrypt }; -/* Double a value in GF(96) */ -static void pyjamask_96_double_l - (unsigned char out[12], const unsigned char in[12]) -{ - unsigned index; - unsigned char mask = (unsigned char)(((signed char)in[0]) >> 7); - for (index = 0; index < 11; ++index) - out[index] = (in[index] << 1) | (in[index + 1] >> 7); - out[11] = (in[11] << 1) ^ (mask & 0x41); - out[10] ^= (mask & 0x06); -} - #define OCB_ALG_NAME pyjamask_96 #define OCB_BLOCK_SIZE 12 #define OCB_NONCE_SIZE PYJAMASK_96_NONCE_SIZE #define OCB_TAG_SIZE PYJAMASK_96_TAG_SIZE -#define OCB_KEY_SCHEDULE pyjamask_key_schedule_t -#define OCB_SETUP_KEY pyjamask_setup_key +#define OCB_KEY_SCHEDULE pyjamask_96_key_schedule_t +#define OCB_SETUP_KEY pyjamask_96_setup_key #define OCB_ENCRYPT_BLOCK pyjamask_96_encrypt #define OCB_DECRYPT_BLOCK pyjamask_96_decrypt -#define OCB_DOUBLE_L pyjamask_96_double_l #include "internal-ocb.h" diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.c b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.h b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/api.h b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/encrypt.c b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/encrypt.c deleted file mode 100644 index f13a728..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "romulus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_m1_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_m1_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinnyutil.h b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-util.h b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.c b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.c deleted file mode 100644 index bb19cc5..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.c +++ /dev/null @@ -1,1974 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "romulus.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const romulus_n1_cipher = { - "Romulus-N1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n1_aead_encrypt, - romulus_n1_aead_decrypt -}; - -aead_cipher_t const romulus_n2_cipher = { - "Romulus-N2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n2_aead_encrypt, - romulus_n2_aead_decrypt -}; - -aead_cipher_t const romulus_n3_cipher = { - "Romulus-N3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n3_aead_encrypt, - romulus_n3_aead_decrypt -}; - -aead_cipher_t const romulus_m1_cipher = { - "Romulus-M1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m1_aead_encrypt, - romulus_m1_aead_decrypt -}; - -aead_cipher_t const romulus_m2_cipher = { - "Romulus-M2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m2_aead_encrypt, - romulus_m2_aead_decrypt -}; - -aead_cipher_t const romulus_m3_cipher = { - "Romulus-M3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m3_aead_encrypt, - romulus_m3_aead_decrypt -}; - -/** - * \brief Limit on the number of bytes of message or associated data (128Mb). - * - * Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for - * payloads well into the petabyte range. It is unlikely that an embedded - * device will have that much memory to store a contiguous packet! - * - * Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper - * 24 bits are difficult to modify in the key schedule. So we only - * update the low 24 bits and leave the high 24 bits fixed. - * - * Romulus-N3 and Romulus-M3 use a 24-bit block counter. - * - * For all algorithms, we limit the block counter to 2^23 so that the block - * counter can never exceed 2^24 - 1. - */ -#define ROMULUS_DATA_LIMIT \ - ((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE)) - -/** - * \brief Initializes the key schedule for Romulus-N1 or Romulus-M1. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 16 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus1_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ - memset(TK + 1, 0, 15); - if (npub) - memcpy(TK + 16, npub, 16); - else - memset(TK + 16, 0, 16); - memcpy(TK + 32, k, 16); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N2 or Romulus-M2. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus2_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - memset(TK + 33, 0, 15); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N3 or Romulus-M3. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus3_init - (skinny_128_256_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[32]; - TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - skinny_128_256_init(ks, TK); -} - -/** - * \brief Sets the domain separation value for Romulus-N1 and M1. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N2 and M2. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N3 and M3. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7); - TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7); - TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7); - TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7); - TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x95); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - * - * For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of - * the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT. - */ -STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -#define romulus3_update_counter(TK1) romulus2_update_counter((TK1)) - -/** - * \brief Process the asssociated data for Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - skinny_128_384_encrypt_tk2(ks, S, S, npub); - return; - } - - /* Process all double blocks except the last */ - romulus1_set_domain(ks, 0x08); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Pad and process the left-over blocks */ - romulus1_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 32) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x18); - } else if (temp > 16) { - /* Left-over partial double block */ - unsigned char pad[16]; - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, 15 - temp); - pad[15] = temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus1_set_domain(ks, 0x18); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus1_set_domain(ks, 0x1A); - } - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus2_set_domain(ks, 0x48); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus2_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x58); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus2_set_domain(ks, 0x58); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus2_set_domain(ks, 0x5A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus3_set_domain(ks, 0x88); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus3_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x98); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus3_set_domain(ks, 0x98); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus3_set_domain(ks, 0x9A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Determine the domain separation value to use on the last - * block of the associated data processing. - * - * \param adlen Length of the associated data in bytes. - * \param mlen Length of the message in bytes. - * \param t Size of the second half of a double block; 12 or 16. - * - * \return The domain separation bits to use to finalize the last block. - */ -static uint8_t romulus_m_final_ad_domain - (unsigned long long adlen, unsigned long long mlen, unsigned t) -{ - uint8_t domain = 0; - unsigned split = 16U; - unsigned leftover; - - /* Determine which domain bits we need based on the length of the ad */ - if (adlen == 0) { - /* No associated data, so only 1 block with padding */ - domain ^= 0x02; - split = t; - } else { - /* Even or odd associated data length? */ - leftover = (unsigned)(adlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x08; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x02; - split = t; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x0A; - } else { - /* Odd with a full single block at the end */ - split = t; - } - } - - /* Determine which domain bits we need based on the length of the message */ - if (mlen == 0) { - /* No message, so only 1 block with padding */ - domain ^= 0x01; - } else { - /* Even or odd message length? */ - leftover = (unsigned)(mlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x04; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x01; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x05; - } - } - return domain; -} - -/** - * \brief Process the asssociated data for Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char pad[16]; - uint8_t final_domain = 0x30; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16); - - /* Process all associated data double blocks except the last */ - romulus1_set_domain(ks, 0x28); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 32) { - /* Last associated data double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus1_set_domain(ks, 0x2C); - romulus1_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - romulus1_update_counter(ks->TK1); - m += 16; - mlen -= 16; - } else if (mlen == 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - m += 16; - mlen -= 16; - } else { - temp = (unsigned)mlen; - memcpy(pad, m, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus1_set_domain(ks, 0x2C); - while (mlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - romulus1_update_counter(ks->TK1); - m += 32; - mlen -= 32; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 32) { - /* Last message double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(pad, m + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus1_set_domain(ks, final_domain); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0x70; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus2_set_domain(ks, 0x68); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus2_set_domain(ks, 0x6C); - romulus2_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus2_set_domain(ks, 0x6C); - while (mlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus2_set_domain(ks, final_domain); - romulus2_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0xB0; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus3_set_domain(ks, 0xA8); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus3_set_domain(ks, 0xAC); - romulus3_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus3_set_domain(ks, 0xAC); - while (mlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus3_set_domain(ks, final_domain); - romulus3_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Applies the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - */ -STATIC_INLINE void romulus_rho - (unsigned char S[16], unsigned char C[16], const unsigned char M[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } -} - -/** - * \brief Applies the inverse of the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - */ -STATIC_INLINE void romulus_rho_inverse - (unsigned char S[16], unsigned char M[16], const unsigned char C[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } -} - -/** - * \brief Applies the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_short - (unsigned char S[16], unsigned char C[16], - const unsigned char M[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Applies the inverse of the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_inverse_short - (unsigned char S[16], unsigned char M[16], - const unsigned char C[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Encrypts a plaintext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho(S, c, m); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho_inverse(S, m, c); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho(S, c, m); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho_inverse(S, m, c); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho(S, c, m); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho_inverse(S, m, c); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Generates the authentication tag from the rolling Romulus state. - * - * \param T Buffer to receive the generated tag; can be the same as S. - * \param S The rolling Romulus state. - */ -STATIC_INLINE void romulus_generate_tag - (unsigned char T[16], const unsigned char S[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7); - } -} - -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n1_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n1_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n2_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n2_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n3_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n3_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m1_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m1_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m2_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m2_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m3_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m3_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.h b/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.h deleted file mode 100644 index e6da29d..0000000 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys-avr/romulus.h +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ROMULUS_H -#define LWCRYPTO_ROMULUS_H - -#include "aead-common.h" - -/** - * \file romulus.h - * \brief Romulus authenticated encryption algorithm family. - * - * Romulus is a family of authenticated encryption algorithms that - * are built around the SKINNY-128 tweakable block cipher. There - * are six members in the family: - * - * \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The Romulus-M variants are resistant to nonce reuse as long as the - * combination of the associated data and plaintext is unique. If the - * same associated data and plaintext are reused under the same nonce, - * then the scheme will leak that the same plaintext has been sent for a - * second time but will not reveal the plaintext itself. - * - * References: https://romulusae.github.io/romulus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all Romulus family members. - */ -#define ROMULUS_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all Romulus family members. - */ -#define ROMULUS_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N1 and Romulus-M1. - */ -#define ROMULUS1_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N2 and Romulus-M2. - */ -#define ROMULUS2_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for Romulus-N3 and Romulus-M3. - */ -#define ROMULUS3_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Romulus-N1 cipher. - */ -extern aead_cipher_t const romulus_n1_cipher; - -/** - * \brief Meta-information block for the Romulus-N2 cipher. - */ -extern aead_cipher_t const romulus_n2_cipher; - -/** - * \brief Meta-information block for the Romulus-N3 cipher. - */ -extern aead_cipher_t const romulus_n3_cipher; - -/** - * \brief Meta-information block for the Romulus-M1 cipher. - */ -extern aead_cipher_t const romulus_m1_cipher; - -/** - * \brief Meta-information block for the Romulus-M2 cipher. - */ -extern aead_cipher_t const romulus_m2_cipher; - -/** - * \brief Meta-information block for the Romulus-M3 cipher. - */ -extern aead_cipher_t const romulus_m3_cipher; - -/** - * \brief Encrypts and authenticates a packet with Romulus-N1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n1_aead_decrypt() - */ -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n1_aead_encrypt() - */ -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n2_aead_decrypt() - */ -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n2_aead_encrypt() - */ -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n3_aead_decrypt() - */ -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n3_aead_encrypt() - */ -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m1_aead_decrypt() - */ -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m1_aead_encrypt() - */ -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m2_aead_decrypt() - */ -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m2_aead_encrypt() - */ -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m3_aead_decrypt() - */ -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m3_aead_encrypt() - */ -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.c +++ b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.h +++ b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-util.h b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-util.h +++ b/romulus/Implementations/crypto_aead/romulusm1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/romulus/Implementations/crypto_aead/romulusm1/rhys/romulus.c b/romulus/Implementations/crypto_aead/romulusm1/rhys/romulus.c index be1c0fa..bb19cc5 100644 --- a/romulus/Implementations/crypto_aead/romulusm1/rhys/romulus.c +++ b/romulus/Implementations/crypto_aead/romulusm1/rhys/romulus.c @@ -116,14 +116,15 @@ static void romulus1_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 1, 0, 15); if (npub) - memcpy(TK, npub, 16); + memcpy(TK + 16, npub, 16); else - memset(TK, 0, 16); - memcpy(TK + 16, k, 16); - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 16, 0, 16); + memcpy(TK + 32, k, 16); + skinny_128_384_init(ks, TK); } /** @@ -138,14 +139,18 @@ static void romulus2_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; - memcpy(TK, k, 16); - memset(TK + 16, 0, 16); - TK[16] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ + memset(TK + 33, 0, 15); + skinny_128_384_init(ks, TK); } /** @@ -160,10 +165,16 @@ static void romulus3_init (skinny_128_256_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - skinny_128_256_init(ks, k, 16); - ks->TK1[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[32]; + TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + skinny_128_256_init(ks, TK); } /** diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.c b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.h b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/api.h b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/encrypt.c b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/encrypt.c deleted file mode 100644 index 520d992..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "romulus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_m2_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_m2_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinnyutil.h b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-util.h b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.c b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.c deleted file mode 100644 index bb19cc5..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.c +++ /dev/null @@ -1,1974 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "romulus.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const romulus_n1_cipher = { - "Romulus-N1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n1_aead_encrypt, - romulus_n1_aead_decrypt -}; - -aead_cipher_t const romulus_n2_cipher = { - "Romulus-N2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n2_aead_encrypt, - romulus_n2_aead_decrypt -}; - -aead_cipher_t const romulus_n3_cipher = { - "Romulus-N3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n3_aead_encrypt, - romulus_n3_aead_decrypt -}; - -aead_cipher_t const romulus_m1_cipher = { - "Romulus-M1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m1_aead_encrypt, - romulus_m1_aead_decrypt -}; - -aead_cipher_t const romulus_m2_cipher = { - "Romulus-M2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m2_aead_encrypt, - romulus_m2_aead_decrypt -}; - -aead_cipher_t const romulus_m3_cipher = { - "Romulus-M3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m3_aead_encrypt, - romulus_m3_aead_decrypt -}; - -/** - * \brief Limit on the number of bytes of message or associated data (128Mb). - * - * Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for - * payloads well into the petabyte range. It is unlikely that an embedded - * device will have that much memory to store a contiguous packet! - * - * Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper - * 24 bits are difficult to modify in the key schedule. So we only - * update the low 24 bits and leave the high 24 bits fixed. - * - * Romulus-N3 and Romulus-M3 use a 24-bit block counter. - * - * For all algorithms, we limit the block counter to 2^23 so that the block - * counter can never exceed 2^24 - 1. - */ -#define ROMULUS_DATA_LIMIT \ - ((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE)) - -/** - * \brief Initializes the key schedule for Romulus-N1 or Romulus-M1. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 16 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus1_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ - memset(TK + 1, 0, 15); - if (npub) - memcpy(TK + 16, npub, 16); - else - memset(TK + 16, 0, 16); - memcpy(TK + 32, k, 16); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N2 or Romulus-M2. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus2_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - memset(TK + 33, 0, 15); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N3 or Romulus-M3. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus3_init - (skinny_128_256_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[32]; - TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - skinny_128_256_init(ks, TK); -} - -/** - * \brief Sets the domain separation value for Romulus-N1 and M1. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N2 and M2. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N3 and M3. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7); - TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7); - TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7); - TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7); - TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x95); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - * - * For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of - * the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT. - */ -STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -#define romulus3_update_counter(TK1) romulus2_update_counter((TK1)) - -/** - * \brief Process the asssociated data for Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - skinny_128_384_encrypt_tk2(ks, S, S, npub); - return; - } - - /* Process all double blocks except the last */ - romulus1_set_domain(ks, 0x08); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Pad and process the left-over blocks */ - romulus1_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 32) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x18); - } else if (temp > 16) { - /* Left-over partial double block */ - unsigned char pad[16]; - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, 15 - temp); - pad[15] = temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus1_set_domain(ks, 0x18); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus1_set_domain(ks, 0x1A); - } - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus2_set_domain(ks, 0x48); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus2_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x58); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus2_set_domain(ks, 0x58); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus2_set_domain(ks, 0x5A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus3_set_domain(ks, 0x88); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus3_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x98); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus3_set_domain(ks, 0x98); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus3_set_domain(ks, 0x9A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Determine the domain separation value to use on the last - * block of the associated data processing. - * - * \param adlen Length of the associated data in bytes. - * \param mlen Length of the message in bytes. - * \param t Size of the second half of a double block; 12 or 16. - * - * \return The domain separation bits to use to finalize the last block. - */ -static uint8_t romulus_m_final_ad_domain - (unsigned long long adlen, unsigned long long mlen, unsigned t) -{ - uint8_t domain = 0; - unsigned split = 16U; - unsigned leftover; - - /* Determine which domain bits we need based on the length of the ad */ - if (adlen == 0) { - /* No associated data, so only 1 block with padding */ - domain ^= 0x02; - split = t; - } else { - /* Even or odd associated data length? */ - leftover = (unsigned)(adlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x08; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x02; - split = t; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x0A; - } else { - /* Odd with a full single block at the end */ - split = t; - } - } - - /* Determine which domain bits we need based on the length of the message */ - if (mlen == 0) { - /* No message, so only 1 block with padding */ - domain ^= 0x01; - } else { - /* Even or odd message length? */ - leftover = (unsigned)(mlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x04; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x01; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x05; - } - } - return domain; -} - -/** - * \brief Process the asssociated data for Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char pad[16]; - uint8_t final_domain = 0x30; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16); - - /* Process all associated data double blocks except the last */ - romulus1_set_domain(ks, 0x28); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 32) { - /* Last associated data double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus1_set_domain(ks, 0x2C); - romulus1_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - romulus1_update_counter(ks->TK1); - m += 16; - mlen -= 16; - } else if (mlen == 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - m += 16; - mlen -= 16; - } else { - temp = (unsigned)mlen; - memcpy(pad, m, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus1_set_domain(ks, 0x2C); - while (mlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - romulus1_update_counter(ks->TK1); - m += 32; - mlen -= 32; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 32) { - /* Last message double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(pad, m + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus1_set_domain(ks, final_domain); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0x70; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus2_set_domain(ks, 0x68); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus2_set_domain(ks, 0x6C); - romulus2_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus2_set_domain(ks, 0x6C); - while (mlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus2_set_domain(ks, final_domain); - romulus2_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0xB0; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus3_set_domain(ks, 0xA8); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus3_set_domain(ks, 0xAC); - romulus3_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus3_set_domain(ks, 0xAC); - while (mlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus3_set_domain(ks, final_domain); - romulus3_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Applies the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - */ -STATIC_INLINE void romulus_rho - (unsigned char S[16], unsigned char C[16], const unsigned char M[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } -} - -/** - * \brief Applies the inverse of the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - */ -STATIC_INLINE void romulus_rho_inverse - (unsigned char S[16], unsigned char M[16], const unsigned char C[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } -} - -/** - * \brief Applies the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_short - (unsigned char S[16], unsigned char C[16], - const unsigned char M[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Applies the inverse of the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_inverse_short - (unsigned char S[16], unsigned char M[16], - const unsigned char C[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Encrypts a plaintext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho(S, c, m); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho_inverse(S, m, c); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho(S, c, m); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho_inverse(S, m, c); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho(S, c, m); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho_inverse(S, m, c); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Generates the authentication tag from the rolling Romulus state. - * - * \param T Buffer to receive the generated tag; can be the same as S. - * \param S The rolling Romulus state. - */ -STATIC_INLINE void romulus_generate_tag - (unsigned char T[16], const unsigned char S[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7); - } -} - -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n1_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n1_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n2_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n2_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n3_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n3_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m1_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m1_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m2_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m2_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m3_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m3_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.h b/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.h deleted file mode 100644 index e6da29d..0000000 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys-avr/romulus.h +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ROMULUS_H -#define LWCRYPTO_ROMULUS_H - -#include "aead-common.h" - -/** - * \file romulus.h - * \brief Romulus authenticated encryption algorithm family. - * - * Romulus is a family of authenticated encryption algorithms that - * are built around the SKINNY-128 tweakable block cipher. There - * are six members in the family: - * - * \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The Romulus-M variants are resistant to nonce reuse as long as the - * combination of the associated data and plaintext is unique. If the - * same associated data and plaintext are reused under the same nonce, - * then the scheme will leak that the same plaintext has been sent for a - * second time but will not reveal the plaintext itself. - * - * References: https://romulusae.github.io/romulus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all Romulus family members. - */ -#define ROMULUS_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all Romulus family members. - */ -#define ROMULUS_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N1 and Romulus-M1. - */ -#define ROMULUS1_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N2 and Romulus-M2. - */ -#define ROMULUS2_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for Romulus-N3 and Romulus-M3. - */ -#define ROMULUS3_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Romulus-N1 cipher. - */ -extern aead_cipher_t const romulus_n1_cipher; - -/** - * \brief Meta-information block for the Romulus-N2 cipher. - */ -extern aead_cipher_t const romulus_n2_cipher; - -/** - * \brief Meta-information block for the Romulus-N3 cipher. - */ -extern aead_cipher_t const romulus_n3_cipher; - -/** - * \brief Meta-information block for the Romulus-M1 cipher. - */ -extern aead_cipher_t const romulus_m1_cipher; - -/** - * \brief Meta-information block for the Romulus-M2 cipher. - */ -extern aead_cipher_t const romulus_m2_cipher; - -/** - * \brief Meta-information block for the Romulus-M3 cipher. - */ -extern aead_cipher_t const romulus_m3_cipher; - -/** - * \brief Encrypts and authenticates a packet with Romulus-N1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n1_aead_decrypt() - */ -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n1_aead_encrypt() - */ -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n2_aead_decrypt() - */ -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n2_aead_encrypt() - */ -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n3_aead_decrypt() - */ -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n3_aead_encrypt() - */ -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m1_aead_decrypt() - */ -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m1_aead_encrypt() - */ -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m2_aead_decrypt() - */ -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m2_aead_encrypt() - */ -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m3_aead_decrypt() - */ -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m3_aead_encrypt() - */ -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.c +++ b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.h +++ b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-util.h b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-util.h index e79158c..e30166d 100644 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-util.h +++ b/romulus/Implementations/crypto_aead/romulusm2/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/romulus/Implementations/crypto_aead/romulusm2/rhys/romulus.c b/romulus/Implementations/crypto_aead/romulusm2/rhys/romulus.c index be1c0fa..bb19cc5 100644 --- a/romulus/Implementations/crypto_aead/romulusm2/rhys/romulus.c +++ b/romulus/Implementations/crypto_aead/romulusm2/rhys/romulus.c @@ -116,14 +116,15 @@ static void romulus1_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 1, 0, 15); if (npub) - memcpy(TK, npub, 16); + memcpy(TK + 16, npub, 16); else - memset(TK, 0, 16); - memcpy(TK + 16, k, 16); - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 16, 0, 16); + memcpy(TK + 32, k, 16); + skinny_128_384_init(ks, TK); } /** @@ -138,14 +139,18 @@ static void romulus2_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; - memcpy(TK, k, 16); - memset(TK + 16, 0, 16); - TK[16] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ + memset(TK + 33, 0, 15); + skinny_128_384_init(ks, TK); } /** @@ -160,10 +165,16 @@ static void romulus3_init (skinny_128_256_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - skinny_128_256_init(ks, k, 16); - ks->TK1[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[32]; + TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + skinny_128_256_init(ks, TK); } /** diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.c b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.h b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/api.h b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/encrypt.c b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/encrypt.c deleted file mode 100644 index 7e0c676..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "romulus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_m3_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_m3_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinnyutil.h b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-util.h b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.c b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.c deleted file mode 100644 index bb19cc5..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.c +++ /dev/null @@ -1,1974 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "romulus.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const romulus_n1_cipher = { - "Romulus-N1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n1_aead_encrypt, - romulus_n1_aead_decrypt -}; - -aead_cipher_t const romulus_n2_cipher = { - "Romulus-N2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n2_aead_encrypt, - romulus_n2_aead_decrypt -}; - -aead_cipher_t const romulus_n3_cipher = { - "Romulus-N3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n3_aead_encrypt, - romulus_n3_aead_decrypt -}; - -aead_cipher_t const romulus_m1_cipher = { - "Romulus-M1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m1_aead_encrypt, - romulus_m1_aead_decrypt -}; - -aead_cipher_t const romulus_m2_cipher = { - "Romulus-M2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m2_aead_encrypt, - romulus_m2_aead_decrypt -}; - -aead_cipher_t const romulus_m3_cipher = { - "Romulus-M3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m3_aead_encrypt, - romulus_m3_aead_decrypt -}; - -/** - * \brief Limit on the number of bytes of message or associated data (128Mb). - * - * Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for - * payloads well into the petabyte range. It is unlikely that an embedded - * device will have that much memory to store a contiguous packet! - * - * Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper - * 24 bits are difficult to modify in the key schedule. So we only - * update the low 24 bits and leave the high 24 bits fixed. - * - * Romulus-N3 and Romulus-M3 use a 24-bit block counter. - * - * For all algorithms, we limit the block counter to 2^23 so that the block - * counter can never exceed 2^24 - 1. - */ -#define ROMULUS_DATA_LIMIT \ - ((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE)) - -/** - * \brief Initializes the key schedule for Romulus-N1 or Romulus-M1. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 16 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus1_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ - memset(TK + 1, 0, 15); - if (npub) - memcpy(TK + 16, npub, 16); - else - memset(TK + 16, 0, 16); - memcpy(TK + 32, k, 16); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N2 or Romulus-M2. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus2_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - memset(TK + 33, 0, 15); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N3 or Romulus-M3. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus3_init - (skinny_128_256_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[32]; - TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - skinny_128_256_init(ks, TK); -} - -/** - * \brief Sets the domain separation value for Romulus-N1 and M1. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N2 and M2. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N3 and M3. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7); - TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7); - TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7); - TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7); - TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x95); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - * - * For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of - * the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT. - */ -STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -#define romulus3_update_counter(TK1) romulus2_update_counter((TK1)) - -/** - * \brief Process the asssociated data for Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - skinny_128_384_encrypt_tk2(ks, S, S, npub); - return; - } - - /* Process all double blocks except the last */ - romulus1_set_domain(ks, 0x08); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Pad and process the left-over blocks */ - romulus1_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 32) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x18); - } else if (temp > 16) { - /* Left-over partial double block */ - unsigned char pad[16]; - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, 15 - temp); - pad[15] = temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus1_set_domain(ks, 0x18); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus1_set_domain(ks, 0x1A); - } - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus2_set_domain(ks, 0x48); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus2_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x58); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus2_set_domain(ks, 0x58); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus2_set_domain(ks, 0x5A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus3_set_domain(ks, 0x88); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus3_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x98); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus3_set_domain(ks, 0x98); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus3_set_domain(ks, 0x9A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Determine the domain separation value to use on the last - * block of the associated data processing. - * - * \param adlen Length of the associated data in bytes. - * \param mlen Length of the message in bytes. - * \param t Size of the second half of a double block; 12 or 16. - * - * \return The domain separation bits to use to finalize the last block. - */ -static uint8_t romulus_m_final_ad_domain - (unsigned long long adlen, unsigned long long mlen, unsigned t) -{ - uint8_t domain = 0; - unsigned split = 16U; - unsigned leftover; - - /* Determine which domain bits we need based on the length of the ad */ - if (adlen == 0) { - /* No associated data, so only 1 block with padding */ - domain ^= 0x02; - split = t; - } else { - /* Even or odd associated data length? */ - leftover = (unsigned)(adlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x08; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x02; - split = t; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x0A; - } else { - /* Odd with a full single block at the end */ - split = t; - } - } - - /* Determine which domain bits we need based on the length of the message */ - if (mlen == 0) { - /* No message, so only 1 block with padding */ - domain ^= 0x01; - } else { - /* Even or odd message length? */ - leftover = (unsigned)(mlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x04; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x01; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x05; - } - } - return domain; -} - -/** - * \brief Process the asssociated data for Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char pad[16]; - uint8_t final_domain = 0x30; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16); - - /* Process all associated data double blocks except the last */ - romulus1_set_domain(ks, 0x28); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 32) { - /* Last associated data double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus1_set_domain(ks, 0x2C); - romulus1_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - romulus1_update_counter(ks->TK1); - m += 16; - mlen -= 16; - } else if (mlen == 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - m += 16; - mlen -= 16; - } else { - temp = (unsigned)mlen; - memcpy(pad, m, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus1_set_domain(ks, 0x2C); - while (mlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - romulus1_update_counter(ks->TK1); - m += 32; - mlen -= 32; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 32) { - /* Last message double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(pad, m + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus1_set_domain(ks, final_domain); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0x70; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus2_set_domain(ks, 0x68); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus2_set_domain(ks, 0x6C); - romulus2_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus2_set_domain(ks, 0x6C); - while (mlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus2_set_domain(ks, final_domain); - romulus2_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0xB0; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus3_set_domain(ks, 0xA8); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus3_set_domain(ks, 0xAC); - romulus3_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus3_set_domain(ks, 0xAC); - while (mlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus3_set_domain(ks, final_domain); - romulus3_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Applies the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - */ -STATIC_INLINE void romulus_rho - (unsigned char S[16], unsigned char C[16], const unsigned char M[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } -} - -/** - * \brief Applies the inverse of the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - */ -STATIC_INLINE void romulus_rho_inverse - (unsigned char S[16], unsigned char M[16], const unsigned char C[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } -} - -/** - * \brief Applies the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_short - (unsigned char S[16], unsigned char C[16], - const unsigned char M[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Applies the inverse of the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_inverse_short - (unsigned char S[16], unsigned char M[16], - const unsigned char C[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Encrypts a plaintext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho(S, c, m); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho_inverse(S, m, c); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho(S, c, m); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho_inverse(S, m, c); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho(S, c, m); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho_inverse(S, m, c); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Generates the authentication tag from the rolling Romulus state. - * - * \param T Buffer to receive the generated tag; can be the same as S. - * \param S The rolling Romulus state. - */ -STATIC_INLINE void romulus_generate_tag - (unsigned char T[16], const unsigned char S[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7); - } -} - -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n1_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n1_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n2_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n2_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n3_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n3_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m1_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m1_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m2_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m2_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m3_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m3_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.h b/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.h deleted file mode 100644 index e6da29d..0000000 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys-avr/romulus.h +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ROMULUS_H -#define LWCRYPTO_ROMULUS_H - -#include "aead-common.h" - -/** - * \file romulus.h - * \brief Romulus authenticated encryption algorithm family. - * - * Romulus is a family of authenticated encryption algorithms that - * are built around the SKINNY-128 tweakable block cipher. There - * are six members in the family: - * - * \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The Romulus-M variants are resistant to nonce reuse as long as the - * combination of the associated data and plaintext is unique. If the - * same associated data and plaintext are reused under the same nonce, - * then the scheme will leak that the same plaintext has been sent for a - * second time but will not reveal the plaintext itself. - * - * References: https://romulusae.github.io/romulus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all Romulus family members. - */ -#define ROMULUS_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all Romulus family members. - */ -#define ROMULUS_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N1 and Romulus-M1. - */ -#define ROMULUS1_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N2 and Romulus-M2. - */ -#define ROMULUS2_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for Romulus-N3 and Romulus-M3. - */ -#define ROMULUS3_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Romulus-N1 cipher. - */ -extern aead_cipher_t const romulus_n1_cipher; - -/** - * \brief Meta-information block for the Romulus-N2 cipher. - */ -extern aead_cipher_t const romulus_n2_cipher; - -/** - * \brief Meta-information block for the Romulus-N3 cipher. - */ -extern aead_cipher_t const romulus_n3_cipher; - -/** - * \brief Meta-information block for the Romulus-M1 cipher. - */ -extern aead_cipher_t const romulus_m1_cipher; - -/** - * \brief Meta-information block for the Romulus-M2 cipher. - */ -extern aead_cipher_t const romulus_m2_cipher; - -/** - * \brief Meta-information block for the Romulus-M3 cipher. - */ -extern aead_cipher_t const romulus_m3_cipher; - -/** - * \brief Encrypts and authenticates a packet with Romulus-N1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n1_aead_decrypt() - */ -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n1_aead_encrypt() - */ -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n2_aead_decrypt() - */ -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n2_aead_encrypt() - */ -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n3_aead_decrypt() - */ -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n3_aead_encrypt() - */ -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m1_aead_decrypt() - */ -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m1_aead_encrypt() - */ -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m2_aead_decrypt() - */ -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m2_aead_encrypt() - */ -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m3_aead_decrypt() - */ -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m3_aead_encrypt() - */ -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.c +++ b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.h +++ b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-util.h b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-util.h index e79158c..e30166d 100644 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-util.h +++ b/romulus/Implementations/crypto_aead/romulusm3/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/romulus/Implementations/crypto_aead/romulusm3/rhys/romulus.c b/romulus/Implementations/crypto_aead/romulusm3/rhys/romulus.c index be1c0fa..bb19cc5 100644 --- a/romulus/Implementations/crypto_aead/romulusm3/rhys/romulus.c +++ b/romulus/Implementations/crypto_aead/romulusm3/rhys/romulus.c @@ -116,14 +116,15 @@ static void romulus1_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 1, 0, 15); if (npub) - memcpy(TK, npub, 16); + memcpy(TK + 16, npub, 16); else - memset(TK, 0, 16); - memcpy(TK + 16, k, 16); - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 16, 0, 16); + memcpy(TK + 32, k, 16); + skinny_128_384_init(ks, TK); } /** @@ -138,14 +139,18 @@ static void romulus2_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; - memcpy(TK, k, 16); - memset(TK + 16, 0, 16); - TK[16] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ + memset(TK + 33, 0, 15); + skinny_128_384_init(ks, TK); } /** @@ -160,10 +165,16 @@ static void romulus3_init (skinny_128_256_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - skinny_128_256_init(ks, k, 16); - ks->TK1[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[32]; + TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + skinny_128_256_init(ks, TK); } /** diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.c b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.h b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/api.h b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/encrypt.c b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/encrypt.c deleted file mode 100644 index 444a0c6..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "romulus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_n1_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_n1_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinnyutil.h b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-util.h b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.c b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.c deleted file mode 100644 index bb19cc5..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.c +++ /dev/null @@ -1,1974 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "romulus.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const romulus_n1_cipher = { - "Romulus-N1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n1_aead_encrypt, - romulus_n1_aead_decrypt -}; - -aead_cipher_t const romulus_n2_cipher = { - "Romulus-N2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n2_aead_encrypt, - romulus_n2_aead_decrypt -}; - -aead_cipher_t const romulus_n3_cipher = { - "Romulus-N3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n3_aead_encrypt, - romulus_n3_aead_decrypt -}; - -aead_cipher_t const romulus_m1_cipher = { - "Romulus-M1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m1_aead_encrypt, - romulus_m1_aead_decrypt -}; - -aead_cipher_t const romulus_m2_cipher = { - "Romulus-M2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m2_aead_encrypt, - romulus_m2_aead_decrypt -}; - -aead_cipher_t const romulus_m3_cipher = { - "Romulus-M3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m3_aead_encrypt, - romulus_m3_aead_decrypt -}; - -/** - * \brief Limit on the number of bytes of message or associated data (128Mb). - * - * Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for - * payloads well into the petabyte range. It is unlikely that an embedded - * device will have that much memory to store a contiguous packet! - * - * Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper - * 24 bits are difficult to modify in the key schedule. So we only - * update the low 24 bits and leave the high 24 bits fixed. - * - * Romulus-N3 and Romulus-M3 use a 24-bit block counter. - * - * For all algorithms, we limit the block counter to 2^23 so that the block - * counter can never exceed 2^24 - 1. - */ -#define ROMULUS_DATA_LIMIT \ - ((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE)) - -/** - * \brief Initializes the key schedule for Romulus-N1 or Romulus-M1. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 16 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus1_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ - memset(TK + 1, 0, 15); - if (npub) - memcpy(TK + 16, npub, 16); - else - memset(TK + 16, 0, 16); - memcpy(TK + 32, k, 16); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N2 or Romulus-M2. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus2_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - memset(TK + 33, 0, 15); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N3 or Romulus-M3. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus3_init - (skinny_128_256_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[32]; - TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - skinny_128_256_init(ks, TK); -} - -/** - * \brief Sets the domain separation value for Romulus-N1 and M1. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N2 and M2. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N3 and M3. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7); - TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7); - TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7); - TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7); - TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x95); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - * - * For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of - * the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT. - */ -STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -#define romulus3_update_counter(TK1) romulus2_update_counter((TK1)) - -/** - * \brief Process the asssociated data for Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - skinny_128_384_encrypt_tk2(ks, S, S, npub); - return; - } - - /* Process all double blocks except the last */ - romulus1_set_domain(ks, 0x08); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Pad and process the left-over blocks */ - romulus1_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 32) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x18); - } else if (temp > 16) { - /* Left-over partial double block */ - unsigned char pad[16]; - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, 15 - temp); - pad[15] = temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus1_set_domain(ks, 0x18); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus1_set_domain(ks, 0x1A); - } - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus2_set_domain(ks, 0x48); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus2_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x58); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus2_set_domain(ks, 0x58); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus2_set_domain(ks, 0x5A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus3_set_domain(ks, 0x88); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus3_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x98); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus3_set_domain(ks, 0x98); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus3_set_domain(ks, 0x9A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Determine the domain separation value to use on the last - * block of the associated data processing. - * - * \param adlen Length of the associated data in bytes. - * \param mlen Length of the message in bytes. - * \param t Size of the second half of a double block; 12 or 16. - * - * \return The domain separation bits to use to finalize the last block. - */ -static uint8_t romulus_m_final_ad_domain - (unsigned long long adlen, unsigned long long mlen, unsigned t) -{ - uint8_t domain = 0; - unsigned split = 16U; - unsigned leftover; - - /* Determine which domain bits we need based on the length of the ad */ - if (adlen == 0) { - /* No associated data, so only 1 block with padding */ - domain ^= 0x02; - split = t; - } else { - /* Even or odd associated data length? */ - leftover = (unsigned)(adlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x08; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x02; - split = t; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x0A; - } else { - /* Odd with a full single block at the end */ - split = t; - } - } - - /* Determine which domain bits we need based on the length of the message */ - if (mlen == 0) { - /* No message, so only 1 block with padding */ - domain ^= 0x01; - } else { - /* Even or odd message length? */ - leftover = (unsigned)(mlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x04; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x01; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x05; - } - } - return domain; -} - -/** - * \brief Process the asssociated data for Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char pad[16]; - uint8_t final_domain = 0x30; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16); - - /* Process all associated data double blocks except the last */ - romulus1_set_domain(ks, 0x28); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 32) { - /* Last associated data double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus1_set_domain(ks, 0x2C); - romulus1_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - romulus1_update_counter(ks->TK1); - m += 16; - mlen -= 16; - } else if (mlen == 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - m += 16; - mlen -= 16; - } else { - temp = (unsigned)mlen; - memcpy(pad, m, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus1_set_domain(ks, 0x2C); - while (mlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - romulus1_update_counter(ks->TK1); - m += 32; - mlen -= 32; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 32) { - /* Last message double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(pad, m + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus1_set_domain(ks, final_domain); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0x70; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus2_set_domain(ks, 0x68); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus2_set_domain(ks, 0x6C); - romulus2_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus2_set_domain(ks, 0x6C); - while (mlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus2_set_domain(ks, final_domain); - romulus2_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0xB0; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus3_set_domain(ks, 0xA8); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus3_set_domain(ks, 0xAC); - romulus3_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus3_set_domain(ks, 0xAC); - while (mlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus3_set_domain(ks, final_domain); - romulus3_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Applies the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - */ -STATIC_INLINE void romulus_rho - (unsigned char S[16], unsigned char C[16], const unsigned char M[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } -} - -/** - * \brief Applies the inverse of the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - */ -STATIC_INLINE void romulus_rho_inverse - (unsigned char S[16], unsigned char M[16], const unsigned char C[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } -} - -/** - * \brief Applies the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_short - (unsigned char S[16], unsigned char C[16], - const unsigned char M[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Applies the inverse of the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_inverse_short - (unsigned char S[16], unsigned char M[16], - const unsigned char C[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Encrypts a plaintext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho(S, c, m); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho_inverse(S, m, c); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho(S, c, m); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho_inverse(S, m, c); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho(S, c, m); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho_inverse(S, m, c); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Generates the authentication tag from the rolling Romulus state. - * - * \param T Buffer to receive the generated tag; can be the same as S. - * \param S The rolling Romulus state. - */ -STATIC_INLINE void romulus_generate_tag - (unsigned char T[16], const unsigned char S[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7); - } -} - -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n1_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n1_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n2_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n2_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n3_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n3_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m1_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m1_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m2_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m2_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m3_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m3_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.h b/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.h deleted file mode 100644 index e6da29d..0000000 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys-avr/romulus.h +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ROMULUS_H -#define LWCRYPTO_ROMULUS_H - -#include "aead-common.h" - -/** - * \file romulus.h - * \brief Romulus authenticated encryption algorithm family. - * - * Romulus is a family of authenticated encryption algorithms that - * are built around the SKINNY-128 tweakable block cipher. There - * are six members in the family: - * - * \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The Romulus-M variants are resistant to nonce reuse as long as the - * combination of the associated data and plaintext is unique. If the - * same associated data and plaintext are reused under the same nonce, - * then the scheme will leak that the same plaintext has been sent for a - * second time but will not reveal the plaintext itself. - * - * References: https://romulusae.github.io/romulus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all Romulus family members. - */ -#define ROMULUS_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all Romulus family members. - */ -#define ROMULUS_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N1 and Romulus-M1. - */ -#define ROMULUS1_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N2 and Romulus-M2. - */ -#define ROMULUS2_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for Romulus-N3 and Romulus-M3. - */ -#define ROMULUS3_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Romulus-N1 cipher. - */ -extern aead_cipher_t const romulus_n1_cipher; - -/** - * \brief Meta-information block for the Romulus-N2 cipher. - */ -extern aead_cipher_t const romulus_n2_cipher; - -/** - * \brief Meta-information block for the Romulus-N3 cipher. - */ -extern aead_cipher_t const romulus_n3_cipher; - -/** - * \brief Meta-information block for the Romulus-M1 cipher. - */ -extern aead_cipher_t const romulus_m1_cipher; - -/** - * \brief Meta-information block for the Romulus-M2 cipher. - */ -extern aead_cipher_t const romulus_m2_cipher; - -/** - * \brief Meta-information block for the Romulus-M3 cipher. - */ -extern aead_cipher_t const romulus_m3_cipher; - -/** - * \brief Encrypts and authenticates a packet with Romulus-N1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n1_aead_decrypt() - */ -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n1_aead_encrypt() - */ -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n2_aead_decrypt() - */ -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n2_aead_encrypt() - */ -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n3_aead_decrypt() - */ -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n3_aead_encrypt() - */ -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m1_aead_decrypt() - */ -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m1_aead_encrypt() - */ -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m2_aead_decrypt() - */ -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m2_aead_encrypt() - */ -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m3_aead_decrypt() - */ -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m3_aead_encrypt() - */ -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.c +++ b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.h +++ b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-util.h b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-util.h +++ b/romulus/Implementations/crypto_aead/romulusn1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/romulus/Implementations/crypto_aead/romulusn1/rhys/romulus.c b/romulus/Implementations/crypto_aead/romulusn1/rhys/romulus.c index be1c0fa..bb19cc5 100644 --- a/romulus/Implementations/crypto_aead/romulusn1/rhys/romulus.c +++ b/romulus/Implementations/crypto_aead/romulusn1/rhys/romulus.c @@ -116,14 +116,15 @@ static void romulus1_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 1, 0, 15); if (npub) - memcpy(TK, npub, 16); + memcpy(TK + 16, npub, 16); else - memset(TK, 0, 16); - memcpy(TK + 16, k, 16); - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 16, 0, 16); + memcpy(TK + 32, k, 16); + skinny_128_384_init(ks, TK); } /** @@ -138,14 +139,18 @@ static void romulus2_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; - memcpy(TK, k, 16); - memset(TK + 16, 0, 16); - TK[16] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ + memset(TK + 33, 0, 15); + skinny_128_384_init(ks, TK); } /** @@ -160,10 +165,16 @@ static void romulus3_init (skinny_128_256_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - skinny_128_256_init(ks, k, 16); - ks->TK1[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[32]; + TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + skinny_128_256_init(ks, TK); } /** diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.c b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.h b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/api.h b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/encrypt.c b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/encrypt.c deleted file mode 100644 index 275a53c..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "romulus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_n2_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_n2_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinnyutil.h b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-util.h b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.c b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.c deleted file mode 100644 index bb19cc5..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.c +++ /dev/null @@ -1,1974 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "romulus.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const romulus_n1_cipher = { - "Romulus-N1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n1_aead_encrypt, - romulus_n1_aead_decrypt -}; - -aead_cipher_t const romulus_n2_cipher = { - "Romulus-N2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n2_aead_encrypt, - romulus_n2_aead_decrypt -}; - -aead_cipher_t const romulus_n3_cipher = { - "Romulus-N3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n3_aead_encrypt, - romulus_n3_aead_decrypt -}; - -aead_cipher_t const romulus_m1_cipher = { - "Romulus-M1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m1_aead_encrypt, - romulus_m1_aead_decrypt -}; - -aead_cipher_t const romulus_m2_cipher = { - "Romulus-M2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m2_aead_encrypt, - romulus_m2_aead_decrypt -}; - -aead_cipher_t const romulus_m3_cipher = { - "Romulus-M3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m3_aead_encrypt, - romulus_m3_aead_decrypt -}; - -/** - * \brief Limit on the number of bytes of message or associated data (128Mb). - * - * Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for - * payloads well into the petabyte range. It is unlikely that an embedded - * device will have that much memory to store a contiguous packet! - * - * Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper - * 24 bits are difficult to modify in the key schedule. So we only - * update the low 24 bits and leave the high 24 bits fixed. - * - * Romulus-N3 and Romulus-M3 use a 24-bit block counter. - * - * For all algorithms, we limit the block counter to 2^23 so that the block - * counter can never exceed 2^24 - 1. - */ -#define ROMULUS_DATA_LIMIT \ - ((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE)) - -/** - * \brief Initializes the key schedule for Romulus-N1 or Romulus-M1. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 16 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus1_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ - memset(TK + 1, 0, 15); - if (npub) - memcpy(TK + 16, npub, 16); - else - memset(TK + 16, 0, 16); - memcpy(TK + 32, k, 16); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N2 or Romulus-M2. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus2_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - memset(TK + 33, 0, 15); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N3 or Romulus-M3. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus3_init - (skinny_128_256_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[32]; - TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - skinny_128_256_init(ks, TK); -} - -/** - * \brief Sets the domain separation value for Romulus-N1 and M1. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N2 and M2. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N3 and M3. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7); - TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7); - TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7); - TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7); - TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x95); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - * - * For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of - * the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT. - */ -STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -#define romulus3_update_counter(TK1) romulus2_update_counter((TK1)) - -/** - * \brief Process the asssociated data for Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - skinny_128_384_encrypt_tk2(ks, S, S, npub); - return; - } - - /* Process all double blocks except the last */ - romulus1_set_domain(ks, 0x08); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Pad and process the left-over blocks */ - romulus1_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 32) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x18); - } else if (temp > 16) { - /* Left-over partial double block */ - unsigned char pad[16]; - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, 15 - temp); - pad[15] = temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus1_set_domain(ks, 0x18); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus1_set_domain(ks, 0x1A); - } - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus2_set_domain(ks, 0x48); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus2_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x58); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus2_set_domain(ks, 0x58); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus2_set_domain(ks, 0x5A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus3_set_domain(ks, 0x88); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus3_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x98); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus3_set_domain(ks, 0x98); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus3_set_domain(ks, 0x9A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Determine the domain separation value to use on the last - * block of the associated data processing. - * - * \param adlen Length of the associated data in bytes. - * \param mlen Length of the message in bytes. - * \param t Size of the second half of a double block; 12 or 16. - * - * \return The domain separation bits to use to finalize the last block. - */ -static uint8_t romulus_m_final_ad_domain - (unsigned long long adlen, unsigned long long mlen, unsigned t) -{ - uint8_t domain = 0; - unsigned split = 16U; - unsigned leftover; - - /* Determine which domain bits we need based on the length of the ad */ - if (adlen == 0) { - /* No associated data, so only 1 block with padding */ - domain ^= 0x02; - split = t; - } else { - /* Even or odd associated data length? */ - leftover = (unsigned)(adlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x08; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x02; - split = t; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x0A; - } else { - /* Odd with a full single block at the end */ - split = t; - } - } - - /* Determine which domain bits we need based on the length of the message */ - if (mlen == 0) { - /* No message, so only 1 block with padding */ - domain ^= 0x01; - } else { - /* Even or odd message length? */ - leftover = (unsigned)(mlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x04; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x01; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x05; - } - } - return domain; -} - -/** - * \brief Process the asssociated data for Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char pad[16]; - uint8_t final_domain = 0x30; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16); - - /* Process all associated data double blocks except the last */ - romulus1_set_domain(ks, 0x28); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 32) { - /* Last associated data double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus1_set_domain(ks, 0x2C); - romulus1_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - romulus1_update_counter(ks->TK1); - m += 16; - mlen -= 16; - } else if (mlen == 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - m += 16; - mlen -= 16; - } else { - temp = (unsigned)mlen; - memcpy(pad, m, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus1_set_domain(ks, 0x2C); - while (mlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - romulus1_update_counter(ks->TK1); - m += 32; - mlen -= 32; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 32) { - /* Last message double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(pad, m + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus1_set_domain(ks, final_domain); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0x70; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus2_set_domain(ks, 0x68); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus2_set_domain(ks, 0x6C); - romulus2_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus2_set_domain(ks, 0x6C); - while (mlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus2_set_domain(ks, final_domain); - romulus2_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0xB0; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus3_set_domain(ks, 0xA8); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus3_set_domain(ks, 0xAC); - romulus3_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus3_set_domain(ks, 0xAC); - while (mlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus3_set_domain(ks, final_domain); - romulus3_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Applies the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - */ -STATIC_INLINE void romulus_rho - (unsigned char S[16], unsigned char C[16], const unsigned char M[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } -} - -/** - * \brief Applies the inverse of the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - */ -STATIC_INLINE void romulus_rho_inverse - (unsigned char S[16], unsigned char M[16], const unsigned char C[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } -} - -/** - * \brief Applies the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_short - (unsigned char S[16], unsigned char C[16], - const unsigned char M[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Applies the inverse of the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_inverse_short - (unsigned char S[16], unsigned char M[16], - const unsigned char C[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Encrypts a plaintext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho(S, c, m); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho_inverse(S, m, c); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho(S, c, m); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho_inverse(S, m, c); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho(S, c, m); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho_inverse(S, m, c); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Generates the authentication tag from the rolling Romulus state. - * - * \param T Buffer to receive the generated tag; can be the same as S. - * \param S The rolling Romulus state. - */ -STATIC_INLINE void romulus_generate_tag - (unsigned char T[16], const unsigned char S[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7); - } -} - -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n1_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n1_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n2_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n2_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n3_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n3_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m1_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m1_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m2_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m2_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m3_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m3_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.h b/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.h deleted file mode 100644 index e6da29d..0000000 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys-avr/romulus.h +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ROMULUS_H -#define LWCRYPTO_ROMULUS_H - -#include "aead-common.h" - -/** - * \file romulus.h - * \brief Romulus authenticated encryption algorithm family. - * - * Romulus is a family of authenticated encryption algorithms that - * are built around the SKINNY-128 tweakable block cipher. There - * are six members in the family: - * - * \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The Romulus-M variants are resistant to nonce reuse as long as the - * combination of the associated data and plaintext is unique. If the - * same associated data and plaintext are reused under the same nonce, - * then the scheme will leak that the same plaintext has been sent for a - * second time but will not reveal the plaintext itself. - * - * References: https://romulusae.github.io/romulus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all Romulus family members. - */ -#define ROMULUS_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all Romulus family members. - */ -#define ROMULUS_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N1 and Romulus-M1. - */ -#define ROMULUS1_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N2 and Romulus-M2. - */ -#define ROMULUS2_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for Romulus-N3 and Romulus-M3. - */ -#define ROMULUS3_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Romulus-N1 cipher. - */ -extern aead_cipher_t const romulus_n1_cipher; - -/** - * \brief Meta-information block for the Romulus-N2 cipher. - */ -extern aead_cipher_t const romulus_n2_cipher; - -/** - * \brief Meta-information block for the Romulus-N3 cipher. - */ -extern aead_cipher_t const romulus_n3_cipher; - -/** - * \brief Meta-information block for the Romulus-M1 cipher. - */ -extern aead_cipher_t const romulus_m1_cipher; - -/** - * \brief Meta-information block for the Romulus-M2 cipher. - */ -extern aead_cipher_t const romulus_m2_cipher; - -/** - * \brief Meta-information block for the Romulus-M3 cipher. - */ -extern aead_cipher_t const romulus_m3_cipher; - -/** - * \brief Encrypts and authenticates a packet with Romulus-N1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n1_aead_decrypt() - */ -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n1_aead_encrypt() - */ -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n2_aead_decrypt() - */ -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n2_aead_encrypt() - */ -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n3_aead_decrypt() - */ -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n3_aead_encrypt() - */ -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m1_aead_decrypt() - */ -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m1_aead_encrypt() - */ -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m2_aead_decrypt() - */ -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m2_aead_encrypt() - */ -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m3_aead_decrypt() - */ -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m3_aead_encrypt() - */ -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.c +++ b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.h +++ b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-util.h b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-util.h index e79158c..e30166d 100644 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-util.h +++ b/romulus/Implementations/crypto_aead/romulusn2/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/romulus/Implementations/crypto_aead/romulusn2/rhys/romulus.c b/romulus/Implementations/crypto_aead/romulusn2/rhys/romulus.c index be1c0fa..bb19cc5 100644 --- a/romulus/Implementations/crypto_aead/romulusn2/rhys/romulus.c +++ b/romulus/Implementations/crypto_aead/romulusn2/rhys/romulus.c @@ -116,14 +116,15 @@ static void romulus1_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 1, 0, 15); if (npub) - memcpy(TK, npub, 16); + memcpy(TK + 16, npub, 16); else - memset(TK, 0, 16); - memcpy(TK + 16, k, 16); - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 16, 0, 16); + memcpy(TK + 32, k, 16); + skinny_128_384_init(ks, TK); } /** @@ -138,14 +139,18 @@ static void romulus2_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; - memcpy(TK, k, 16); - memset(TK + 16, 0, 16); - TK[16] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ + memset(TK + 33, 0, 15); + skinny_128_384_init(ks, TK); } /** @@ -160,10 +165,16 @@ static void romulus3_init (skinny_128_256_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - skinny_128_256_init(ks, k, 16); - ks->TK1[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[32]; + TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + skinny_128_256_init(ks, TK); } /** diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.c b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.h b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/api.h b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/encrypt.c b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/encrypt.c deleted file mode 100644 index a522291..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "romulus.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_n3_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return romulus_n3_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinnyutil.h b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-util.h b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.c b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.c deleted file mode 100644 index bb19cc5..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.c +++ /dev/null @@ -1,1974 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "romulus.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const romulus_n1_cipher = { - "Romulus-N1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n1_aead_encrypt, - romulus_n1_aead_decrypt -}; - -aead_cipher_t const romulus_n2_cipher = { - "Romulus-N2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n2_aead_encrypt, - romulus_n2_aead_decrypt -}; - -aead_cipher_t const romulus_n3_cipher = { - "Romulus-N3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_n3_aead_encrypt, - romulus_n3_aead_decrypt -}; - -aead_cipher_t const romulus_m1_cipher = { - "Romulus-M1", - ROMULUS_KEY_SIZE, - ROMULUS1_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m1_aead_encrypt, - romulus_m1_aead_decrypt -}; - -aead_cipher_t const romulus_m2_cipher = { - "Romulus-M2", - ROMULUS_KEY_SIZE, - ROMULUS2_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m2_aead_encrypt, - romulus_m2_aead_decrypt -}; - -aead_cipher_t const romulus_m3_cipher = { - "Romulus-M3", - ROMULUS_KEY_SIZE, - ROMULUS3_NONCE_SIZE, - ROMULUS_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - romulus_m3_aead_encrypt, - romulus_m3_aead_decrypt -}; - -/** - * \brief Limit on the number of bytes of message or associated data (128Mb). - * - * Romulus-N1 and Romulus-M1 use a 56-bit block counter which allows for - * payloads well into the petabyte range. It is unlikely that an embedded - * device will have that much memory to store a contiguous packet! - * - * Romulus-N2 and Romulus-M2 use a 48-bit block counter but the upper - * 24 bits are difficult to modify in the key schedule. So we only - * update the low 24 bits and leave the high 24 bits fixed. - * - * Romulus-N3 and Romulus-M3 use a 24-bit block counter. - * - * For all algorithms, we limit the block counter to 2^23 so that the block - * counter can never exceed 2^24 - 1. - */ -#define ROMULUS_DATA_LIMIT \ - ((unsigned long long)((1ULL << 23) * SKINNY_128_BLOCK_SIZE)) - -/** - * \brief Initializes the key schedule for Romulus-N1 or Romulus-M1. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 16 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus1_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ - memset(TK + 1, 0, 15); - if (npub) - memcpy(TK + 16, npub, 16); - else - memset(TK + 16, 0, 16); - memcpy(TK + 32, k, 16); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N2 or Romulus-M2. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus2_init - (skinny_128_384_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[48]; - TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - memset(TK + 33, 0, 15); - skinny_128_384_init(ks, TK); -} - -/** - * \brief Initializes the key schedule for Romulus-N3 or Romulus-M3. - * - * \param ks Points to the key schedule to initialize. - * \param k Points to the 16 bytes of the key. - * \param npub Points to the 12 bytes of the nonce. May be NULL - * if the nonce will be updated on the fly. - */ -static void romulus3_init - (skinny_128_256_key_schedule_t *ks, - const unsigned char *k, const unsigned char *npub) -{ - unsigned char TK[32]; - TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) { - TK[1] = TK[2] = TK[3] = 0; - memcpy(TK + 4, npub, 12); - } else { - memset(TK + 1, 0, 15); - } - memcpy(TK + 16, k, 16); - skinny_128_256_init(ks, TK); -} - -/** - * \brief Sets the domain separation value for Romulus-N1 and M1. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus1_set_domain(ks, domain) ((ks)->TK1[7] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N2 and M2. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus2_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Sets the domain separation value for Romulus-N3 and M3. - * - * \param ks The key schedule to set the domain separation value into. - * \param domain The domain separation value. - */ -#define romulus3_set_domain(ks, domain) ((ks)->TK1[3] = (domain)) - -/** - * \brief Updates the 56-bit LFSR block counter for Romulus-N1 and M1. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -STATIC_INLINE void romulus1_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[6])) >> 7); - TK1[6] = (TK1[6] << 1) | (TK1[5] >> 7); - TK1[5] = (TK1[5] << 1) | (TK1[4] >> 7); - TK1[4] = (TK1[4] << 1) | (TK1[3] >> 7); - TK1[3] = (TK1[3] << 1) | (TK1[2] >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x95); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N2 or M2. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - * - * For Romulus-N2 and Romulus-M2 this will only update the low 24 bits of - * the 48-bit LFSR. The high 24 bits are fixed due to ROMULUS_DATA_LIMIT. - */ -STATIC_INLINE void romulus2_update_counter(uint8_t TK1[16]) -{ - uint8_t mask = (uint8_t)(((int8_t)(TK1[2])) >> 7); - TK1[2] = (TK1[2] << 1) | (TK1[1] >> 7); - TK1[1] = (TK1[1] << 1) | (TK1[0] >> 7); - TK1[0] = (TK1[0] << 1) ^ (mask & 0x1B); -} - -/** - * \brief Updates the 24-bit LFSR block counter for Romulus-N3 or M3. - * - * \param TK1 Points to the TK1 part of the key schedule containing the LFSR. - */ -#define romulus3_update_counter(TK1) romulus2_update_counter((TK1)) - -/** - * \brief Process the asssociated data for Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - skinny_128_384_encrypt_tk2(ks, S, S, npub); - return; - } - - /* Process all double blocks except the last */ - romulus1_set_domain(ks, 0x08); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Pad and process the left-over blocks */ - romulus1_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 32) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x18); - } else if (temp > 16) { - /* Left-over partial double block */ - unsigned char pad[16]; - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, 15 - temp); - pad[15] = temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x1A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus1_set_domain(ks, 0x18); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus1_set_domain(ks, 0x1A); - } - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus2_set_domain(ks, 0x48); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus2_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x58); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x5A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus2_set_domain(ks, 0x58); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus2_set_domain(ks, 0x5A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void romulus_n3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char temp; - - /* Handle the special case of no associated data */ - if (adlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all double blocks except the last */ - romulus3_set_domain(ks, 0x88); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Pad and process the left-over blocks */ - romulus3_update_counter(ks->TK1); - temp = (unsigned)adlen; - if (temp == 28) { - /* Left-over complete double block */ - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x98); - } else if (temp > 16) { - /* Left-over partial double block */ - temp -= 16; - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp); - ks->TK1[15] = temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x9A); - } else if (temp == 16) { - /* Left-over complete single block */ - lw_xor_block(S, ad, temp); - romulus3_set_domain(ks, 0x98); - } else { - /* Left-over partial single block */ - lw_xor_block(S, ad, temp); - S[15] ^= temp; - romulus3_set_domain(ks, 0x9A); - } - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Determine the domain separation value to use on the last - * block of the associated data processing. - * - * \param adlen Length of the associated data in bytes. - * \param mlen Length of the message in bytes. - * \param t Size of the second half of a double block; 12 or 16. - * - * \return The domain separation bits to use to finalize the last block. - */ -static uint8_t romulus_m_final_ad_domain - (unsigned long long adlen, unsigned long long mlen, unsigned t) -{ - uint8_t domain = 0; - unsigned split = 16U; - unsigned leftover; - - /* Determine which domain bits we need based on the length of the ad */ - if (adlen == 0) { - /* No associated data, so only 1 block with padding */ - domain ^= 0x02; - split = t; - } else { - /* Even or odd associated data length? */ - leftover = (unsigned)(adlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x08; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x02; - split = t; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x0A; - } else { - /* Odd with a full single block at the end */ - split = t; - } - } - - /* Determine which domain bits we need based on the length of the message */ - if (mlen == 0) { - /* No message, so only 1 block with padding */ - domain ^= 0x01; - } else { - /* Even or odd message length? */ - leftover = (unsigned)(mlen % (16U + t)); - if (leftover == 0) { - /* Even with a full double block at the end */ - domain ^= 0x04; - } else if (leftover < split) { - /* Odd with a partial single block at the end */ - domain ^= 0x01; - } else if (leftover > split) { - /* Even with a partial double block at the end */ - domain ^= 0x05; - } - } - return domain; -} - -/** - * \brief Process the asssociated data for Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m1_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char pad[16]; - uint8_t final_domain = 0x30; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 16); - - /* Process all associated data double blocks except the last */ - romulus1_set_domain(ks, 0x28); - while (adlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - ad += 32; - adlen -= 32; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 32) { - /* Last associated data double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - skinny_128_384_encrypt_tk2(ks, S, S, ad + 16); - romulus1_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(pad, ad + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - romulus1_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus1_set_domain(ks, 0x2C); - romulus1_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - romulus1_update_counter(ks->TK1); - m += 16; - mlen -= 16; - } else if (mlen == 16) { - skinny_128_384_encrypt_tk2(ks, S, S, m); - m += 16; - mlen -= 16; - } else { - temp = (unsigned)mlen; - memcpy(pad, m, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus1_set_domain(ks, 0x2C); - while (mlen > 32) { - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - romulus1_update_counter(ks->TK1); - m += 32; - mlen -= 32; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 32) { - /* Last message double block is full */ - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - skinny_128_384_encrypt_tk2(ks, S, S, m + 16); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus1_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(pad, m + 16, temp); - memset(pad + temp, 0, sizeof(pad) - temp - 1); - pad[sizeof(pad) - 1] = (unsigned char)temp; - skinny_128_384_encrypt_tk2(ks, S, S, pad); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus1_set_domain(ks, final_domain); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt_tk2(ks, S, S, npub); -} - -/** - * \brief Process the asssociated data for Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m2_process_ad - (skinny_128_384_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0x70; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus2_set_domain(ks, 0x68); - while (adlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus2_set_domain(ks, 0x6C); - romulus2_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_384_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus2_set_domain(ks, 0x6C); - while (mlen > 28) { - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - romulus2_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_384_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus2_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_384_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus2_set_domain(ks, final_domain); - romulus2_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Process the asssociated data for Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param npub Points to the nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - * \param m Points to the message plaintext. - * \param mlen Length of the message plaintext. - */ -static void romulus_m3_process_ad - (skinny_128_256_key_schedule_t *ks, - unsigned char S[16], const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *m, unsigned long long mlen) -{ - uint8_t final_domain = 0xB0; - unsigned temp; - - /* Determine the domain separator to use on the final block */ - final_domain ^= romulus_m_final_ad_domain(adlen, mlen, 12); - - /* Process all associated data double blocks except the last */ - romulus3_set_domain(ks, 0xA8); - while (adlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - ad += 28; - adlen -= 28; - } - - /* Process the last associated data double block */ - temp = (unsigned)adlen; - if (temp == 28) { - /* Last associated data double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else if (temp > 16) { - /* Last associated data double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, ad, 16); - memcpy(ks->TK1 + 4, ad + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - } else { - /* Last associated data block is single. Needs to be combined - * with the first block of the message payload */ - romulus3_set_domain(ks, 0xAC); - romulus3_update_counter(ks->TK1); - if (temp == 16) { - lw_xor_block(S, ad, 16); - } else { - lw_xor_block(S, ad, temp); - S[15] ^= (unsigned char)temp; - } - if (mlen > 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 12; - mlen -= 12; - } else if (mlen == 12) { - memcpy(ks->TK1 + 4, m, 12); - skinny_128_256_encrypt(ks, S, S); - m += 12; - mlen -= 12; - } else { - temp = (unsigned)mlen; - memcpy(ks->TK1 + 4, m, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - mlen = 0; - } - } - - /* Process all message double blocks except the last */ - romulus3_set_domain(ks, 0xAC); - while (mlen > 28) { - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - romulus3_update_counter(ks->TK1); - m += 28; - mlen -= 28; - } - - /* Process the last message double block */ - temp = (unsigned)mlen; - if (temp == 28) { - /* Last message double block is full */ - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, 12); - skinny_128_256_encrypt(ks, S, S); - } else if (temp > 16) { - /* Last message double block is partial */ - temp -= 16; - romulus3_update_counter(ks->TK1); - lw_xor_block(S, m, 16); - memcpy(ks->TK1 + 4, m + 16, temp); - memset(ks->TK1 + 4 + temp, 0, 12 - temp - 1); - ks->TK1[15] = (unsigned char)temp; - skinny_128_256_encrypt(ks, S, S); - } else if (temp == 16) { - /* Last message single block is full */ - lw_xor_block(S, m, 16); - } else if (temp > 0) { - /* Last message single block is partial */ - lw_xor_block(S, m, temp); - S[15] ^= (unsigned char)temp; - } - - /* Process the last partial block */ - romulus3_set_domain(ks, final_domain); - romulus3_update_counter(ks->TK1); - memcpy(ks->TK1 + 4, npub, 12); - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Applies the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - */ -STATIC_INLINE void romulus_rho - (unsigned char S[16], unsigned char C[16], const unsigned char M[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } -} - -/** - * \brief Applies the inverse of the Romulus rho function. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - */ -STATIC_INLINE void romulus_rho_inverse - (unsigned char S[16], unsigned char M[16], const unsigned char C[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } -} - -/** - * \brief Applies the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param C Ciphertext message output block. - * \param M Plaintext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_short - (unsigned char S[16], unsigned char C[16], - const unsigned char M[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = M[index]; - S[index] ^= m; - C[index] = m ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Applies the inverse of the Romulus rho function to a short block. - * - * \param S The rolling Romulus state. - * \param M Plaintext message output block. - * \param C Ciphertext message input block. - * \param len Length of the short block, must be less than 16. - */ -STATIC_INLINE void romulus_rho_inverse_short - (unsigned char S[16], unsigned char M[16], - const unsigned char C[16], unsigned len) -{ - unsigned index; - for (index = 0; index < len; ++index) { - unsigned char s = S[index]; - unsigned char m = C[index] ^ ((s >> 1) ^ (s & 0x80) ^ (s << 7)); - S[index] ^= m; - M[index] = m; - } - S[15] ^= (unsigned char)len; /* Padding */ -} - -/** - * \brief Encrypts a plaintext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho(S, c, m); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus1_update_counter(ks->TK1); - romulus1_set_domain(ks, 0x15); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus1_set_domain(ks, 0x04); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus1_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus1_set_domain(ks, 0x15); - } else { - romulus_rho_inverse(S, m, c); - romulus1_set_domain(ks, 0x14); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho(S, c, m); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus2_update_counter(ks->TK1); - romulus2_set_domain(ks, 0x55); - skinny_128_384_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus2_set_domain(ks, 0x44); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - skinny_128_384_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus2_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus2_set_domain(ks, 0x55); - } else { - romulus_rho_inverse(S, m, c); - romulus2_set_domain(ks, 0x54); - } - skinny_128_384_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no plaintext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_short(S, c, m, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho(S, c, m); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-N3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_n3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - unsigned temp; - - /* Handle the special case of no ciphertext */ - if (mlen == 0) { - romulus3_update_counter(ks->TK1); - romulus3_set_domain(ks, 0x95); - skinny_128_256_encrypt(ks, S, S); - return; - } - - /* Process all blocks except the last */ - romulus3_set_domain(ks, 0x84); - while (mlen > 16) { - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - skinny_128_256_encrypt(ks, S, S); - c += 16; - m += 16; - mlen -= 16; - } - - /* Pad and process the last block */ - temp = (unsigned)mlen; - romulus3_update_counter(ks->TK1); - if (temp < 16) { - romulus_rho_inverse_short(S, m, c, temp); - romulus3_set_domain(ks, 0x95); - } else { - romulus_rho_inverse(S, m, c); - romulus3_set_domain(ks, 0x94); - } - skinny_128_256_encrypt(ks, S, S); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M1. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m1_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus1_set_domain(ks, 0x24); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus1_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M2. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m2_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus2_set_domain(ks, 0x64); - while (mlen > 16) { - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus2_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_384_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Encrypts a plaintext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the buffer containing the plaintext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *c, const unsigned char *m, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho(S, c, m); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_short(S, c, m, (unsigned)mlen); -} - -/** - * \brief Decrypts a ciphertext message with Romulus-M3. - * - * \param ks Points to the key schedule. - * \param S The rolling Romulus state. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the buffer containing the ciphertext. - * \param mlen Length of the plaintext in bytes. - */ -static void romulus_m3_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char S[16], - unsigned char *m, const unsigned char *c, unsigned long long mlen) -{ - /* Nothing to do if the message is empty */ - if (!mlen) - return; - - /* Process all block except the last */ - romulus3_set_domain(ks, 0xA4); - while (mlen > 16) { - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse(S, m, c); - romulus3_update_counter(ks->TK1); - c += 16; - m += 16; - mlen -= 16; - } - - /* Handle the last block */ - skinny_128_256_encrypt(ks, S, S); - romulus_rho_inverse_short(S, m, c, (unsigned)mlen); -} - -/** - * \brief Generates the authentication tag from the rolling Romulus state. - * - * \param T Buffer to receive the generated tag; can be the same as S. - * \param S The rolling Romulus state. - */ -STATIC_INLINE void romulus_generate_tag - (unsigned char T[16], const unsigned char S[16]) -{ - unsigned index; - for (index = 0; index < 16; ++index) { - unsigned char s = S[index]; - T[index] = (s >> 1) ^ (s & 0x80) ^ (s << 7); - } -} - -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n1_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n1_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n1_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n2_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n2_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n2_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypts the plaintext to produce the ciphertext */ - romulus_n3_encrypt(&ks, S, c, m, mlen); - - /* Generate the authentication tag */ - romulus_generate_tag(c + mlen, S); - return 0; -} - -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_n3_process_ad(&ks, S, npub, ad, adlen); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= ROMULUS_TAG_SIZE; - romulus_n3_decrypt(&ks, S, m, c, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m1_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus1_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m1_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus1_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m1_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m2_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus2_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m2_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus2_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m2_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} - -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || mlen > ROMULUS_DATA_LIMIT) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data and the plaintext message */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, mlen); - - /* Generate the authentication tag, which is also the initialization - * vector for the encryption portion of the packet processing */ - romulus_generate_tag(S, S); - memcpy(c + mlen, S, ROMULUS_TAG_SIZE); - - /* Re-initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Encrypt the plaintext to produce the ciphertext */ - romulus_m3_encrypt(&ks, S, c, m, mlen); - return 0; -} - -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char S[16]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < ROMULUS_TAG_SIZE) - return -1; - *mlen = clen - ROMULUS_TAG_SIZE; - - /* Validate the length of the associated data and message */ - if (adlen > ROMULUS_DATA_LIMIT || - clen > (ROMULUS_DATA_LIMIT + ROMULUS_TAG_SIZE)) - return -2; - - /* Initialize the key schedule with the key and nonce */ - romulus3_init(&ks, k, npub); - - /* Decrypt the ciphertext to produce the plaintext, using the - * authentication tag as the initialization vector for decryption */ - clen -= ROMULUS_TAG_SIZE; - memcpy(S, c + clen, ROMULUS_TAG_SIZE); - romulus_m3_decrypt(&ks, S, m, c, clen); - - /* Re-initialize the key schedule with the key and no nonce. Associated - * data processing varies the nonce from block to block */ - romulus3_init(&ks, k, 0); - - /* Process the associated data */ - memset(S, 0, sizeof(S)); - romulus_m3_process_ad(&ks, S, npub, ad, adlen, m, clen); - - /* Check the authentication tag */ - romulus_generate_tag(S, S); - return aead_check_tag(m, clen, S, c + clen, ROMULUS_TAG_SIZE); -} diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.h b/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.h deleted file mode 100644 index e6da29d..0000000 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys-avr/romulus.h +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_ROMULUS_H -#define LWCRYPTO_ROMULUS_H - -#include "aead-common.h" - -/** - * \file romulus.h - * \brief Romulus authenticated encryption algorithm family. - * - * Romulus is a family of authenticated encryption algorithms that - * are built around the SKINNY-128 tweakable block cipher. There - * are six members in the family: - * - * \li Romulus-N1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li Romulus-N2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-N3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li Romulus-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li Romulus-M3 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The Romulus-M variants are resistant to nonce reuse as long as the - * combination of the associated data and plaintext is unique. If the - * same associated data and plaintext are reused under the same nonce, - * then the scheme will leak that the same plaintext has been sent for a - * second time but will not reveal the plaintext itself. - * - * References: https://romulusae.github.io/romulus/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all Romulus family members. - */ -#define ROMULUS_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all Romulus family members. - */ -#define ROMULUS_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N1 and Romulus-M1. - */ -#define ROMULUS1_NONCE_SIZE 16 - -/** - * \brief Size of the nonce for Romulus-N2 and Romulus-M2. - */ -#define ROMULUS2_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for Romulus-N3 and Romulus-M3. - */ -#define ROMULUS3_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the Romulus-N1 cipher. - */ -extern aead_cipher_t const romulus_n1_cipher; - -/** - * \brief Meta-information block for the Romulus-N2 cipher. - */ -extern aead_cipher_t const romulus_n2_cipher; - -/** - * \brief Meta-information block for the Romulus-N3 cipher. - */ -extern aead_cipher_t const romulus_n3_cipher; - -/** - * \brief Meta-information block for the Romulus-M1 cipher. - */ -extern aead_cipher_t const romulus_m1_cipher; - -/** - * \brief Meta-information block for the Romulus-M2 cipher. - */ -extern aead_cipher_t const romulus_m2_cipher; - -/** - * \brief Meta-information block for the Romulus-M3 cipher. - */ -extern aead_cipher_t const romulus_m3_cipher; - -/** - * \brief Encrypts and authenticates a packet with Romulus-N1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n1_aead_decrypt() - */ -int romulus_n1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n1_aead_encrypt() - */ -int romulus_n1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n2_aead_decrypt() - */ -int romulus_n2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n2_aead_encrypt() - */ -int romulus_n2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-N3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_n3_aead_decrypt() - */ -int romulus_n3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-N3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_n3_aead_encrypt() - */ -int romulus_n3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m1_aead_decrypt() - */ -int romulus_m1_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m1_aead_encrypt() - */ -int romulus_m1_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m2_aead_decrypt() - */ -int romulus_m2_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m2_aead_encrypt() - */ -int romulus_m2_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Romulus-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa romulus_m3_aead_decrypt() - */ -int romulus_m3_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Romulus-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa romulus_m3_aead_encrypt() - */ -int romulus_m3_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128-avr.S b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.c b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.c +++ b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.h b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.h +++ b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-util.h b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-util.h index e79158c..e30166d 100644 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-util.h +++ b/romulus/Implementations/crypto_aead/romulusn3/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/romulus/Implementations/crypto_aead/romulusn3/rhys/romulus.c b/romulus/Implementations/crypto_aead/romulusn3/rhys/romulus.c index be1c0fa..bb19cc5 100644 --- a/romulus/Implementations/crypto_aead/romulusn3/rhys/romulus.c +++ b/romulus/Implementations/crypto_aead/romulusn3/rhys/romulus.c @@ -116,14 +116,15 @@ static void romulus1_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 1, 0, 15); if (npub) - memcpy(TK, npub, 16); + memcpy(TK + 16, npub, 16); else - memset(TK, 0, 16); - memcpy(TK + 16, k, 16); - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the 56-bit LFSR counter */ + memset(TK + 16, 0, 16); + memcpy(TK + 32, k, 16); + skinny_128_384_init(ks, TK); } /** @@ -138,14 +139,18 @@ static void romulus2_init (skinny_128_384_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - unsigned char TK[32]; - memcpy(TK, k, 16); - memset(TK + 16, 0, 16); - TK[16] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ - skinny_128_384_init(ks, TK, sizeof(TK)); - ks->TK1[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[48]; + TK[0] = 0x01; /* Initialize the low 24 bits of the LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + TK[32] = 0x01; /* Initialize the high 24 bits of the LFSR counter */ + memset(TK + 33, 0, 15); + skinny_128_384_init(ks, TK); } /** @@ -160,10 +165,16 @@ static void romulus3_init (skinny_128_256_key_schedule_t *ks, const unsigned char *k, const unsigned char *npub) { - skinny_128_256_init(ks, k, 16); - ks->TK1[0] = 0x01; /* Initialize the 24-bit LFSR counter */ - if (npub) - memcpy(ks->TK1 + 4, npub, 12); + unsigned char TK[32]; + TK[0] = 0x01; /* Initialize the 24-bit LFSR counter */ + if (npub) { + TK[1] = TK[2] = TK[3] = 0; + memcpy(TK + 4, npub, 12); + } else { + memset(TK + 1, 0, 15); + } + memcpy(TK + 16, k, 16); + skinny_128_256_init(ks, TK); } /** diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.c b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.h b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/api.h b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/api.h deleted file mode 100644 index 75fabd7..0000000 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 32 -#define CRYPTO_NOOVERLAP 1 diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/encrypt.c b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/encrypt.c deleted file mode 100644 index 9ce5559..0000000 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "saturnin.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return saturnin_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return saturnin_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/internal-util.h b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.c b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.c deleted file mode 100644 index 734fc69..0000000 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.c +++ /dev/null @@ -1,781 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "saturnin.h" -#include "internal-util.h" -#include - -aead_cipher_t const saturnin_cipher = { - "SATURNIN-CTR-Cascade", - SATURNIN_KEY_SIZE, - SATURNIN_NONCE_SIZE, - SATURNIN_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - saturnin_aead_encrypt, - saturnin_aead_decrypt -}; - -aead_cipher_t const saturnin_short_cipher = { - "SATURNIN-Short", - SATURNIN_KEY_SIZE, - SATURNIN_NONCE_SIZE, - SATURNIN_TAG_SIZE, - AEAD_FLAG_NONE, - saturnin_short_aead_encrypt, - saturnin_short_aead_decrypt -}; - -aead_hash_algorithm_t const saturnin_hash_algorithm = { - "SATURNIN-Hash", - sizeof(saturnin_hash_state_t), - SATURNIN_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - saturnin_hash, - (aead_hash_init_t)saturnin_hash_init, - (aead_hash_update_t)saturnin_hash_update, - (aead_hash_finalize_t)saturnin_hash_finalize, - 0, /* absorb */ - 0 /* squeeze */ -}; - -/* Round constant tables for various combinations of rounds and domain_sep */ -static uint32_t const RC_10_1[] = { - 0x4eb026c2, 0x90595303, 0xaa8fe632, 0xfe928a92, 0x4115a419, - 0x93539532, 0x5db1cc4e, 0x541515ca, 0xbd1f55a8, 0x5a6e1a0d -}; -static uint32_t const RC_10_2[] = { - 0x4e4526b5, 0xa3565ff0, 0x0f8f20d8, 0x0b54bee1, 0x7d1a6c9d, - 0x17a6280a, 0xaa46c986, 0xc1199062, 0x182c5cde, 0xa00d53fe -}; -static uint32_t const RC_10_3[] = { - 0x4e162698, 0xb2535ba1, 0x6c8f9d65, 0x5816ad30, 0x691fd4fa, - 0x6bf5bcf9, 0xf8eb3525, 0xb21decfa, 0x7b3da417, 0xf62c94b4 -}; -static uint32_t const RC_10_4[] = { - 0x4faf265b, 0xc5484616, 0x45dcad21, 0xe08bd607, 0x0504fdb8, - 0x1e1f5257, 0x45fbc216, 0xeb529b1f, 0x52194e32, 0x5498c018 -}; -static uint32_t const RC_10_5[] = { - 0x4ffc2676, 0xd44d4247, 0x26dc109c, 0xb3c9c5d6, 0x110145df, - 0x624cc6a4, 0x17563eb5, 0x9856e787, 0x3108b6fb, 0x02b90752 -}; -static uint32_t const RC_10_6[] = { - 0x4f092601, 0xe7424eb4, 0x83dcd676, 0x460ff1a5, 0x2d0e8d5b, - 0xe6b97b9c, 0xe0a13b7d, 0x0d5a622f, 0x943bbf8d, 0xf8da4ea1 -}; -static uint32_t const RC_16_7[] = { - 0x3fba180c, 0x563ab9ab, 0x125ea5ef, 0x859da26c, 0xb8cf779b, - 0x7d4de793, 0x07efb49f, 0x8d525306, 0x1e08e6ab, 0x41729f87, - 0x8c4aef0a, 0x4aa0c9a7, 0xd93a95ef, 0xbb00d2af, 0xb62c5bf0, - 0x386d94d8 -}; -static uint32_t const RC_16_8[] = { - 0x3c9b19a7, 0xa9098694, 0x23f878da, 0xa7b647d3, 0x74fc9d78, - 0xeacaae11, 0x2f31a677, 0x4cc8c054, 0x2f51ca05, 0x5268f195, - 0x4f5b8a2b, 0xf614b4ac, 0xf1d95401, 0x764d2568, 0x6a493611, - 0x8eef9c3e -}; - -/* Rotate the 4-bit nibbles within a 16-bit word left */ -#define leftRotate4_N(a, mask1, bits1, mask2, bits2) \ - do { \ - uint32_t _temp = (a); \ - (a) = ((_temp & (mask1)) << (bits1)) | \ - ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (4 - (bits1))) | \ - ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ - ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (4 - (bits2))); \ - } while (0) - -/* Rotate 16-bit subwords left */ -#define leftRotate16_N(a, mask1, bits1, mask2, bits2) \ - do { \ - uint32_t _temp = (a); \ - (a) = ((_temp & (mask1)) << (bits1)) | \ - ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (16 - (bits1))) | \ - ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ - ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (16 - (bits2))); \ - } while (0) - -/* XOR the SATURNIN state with the key */ -#define saturnin_xor_key() \ - do { \ - for (index = 0; index < 8; ++index) \ - S[index] ^= K[index]; \ - } while (0) - -/* XOR the SATURNIN state with a rotated version of the key */ -#define saturnin_xor_key_rotated() \ - do { \ - for (index = 0; index < 8; ++index) \ - S[index] ^= K[index + 8]; \ - } while (0) - -/* Apply an SBOX layer for SATURNIN - definition from the specification */ -#define S_LAYER(a, b, c, d) \ - do { \ - (a) ^= (b) & (c); \ - (b) ^= (a) | (d); \ - (d) ^= (b) | (c); \ - (c) ^= (b) & (d); \ - (b) ^= (a) | (c); \ - (a) ^= (b) | (d); \ - } while (0) - -/* Apply an SBOX layer for SATURNIN in reverse */ -#define S_LAYER_INVERSE(a, b, c, d) \ - do { \ - (a) ^= (b) | (d); \ - (b) ^= (a) | (c); \ - (c) ^= (b) & (d); \ - (d) ^= (b) | (c); \ - (b) ^= (a) | (d); \ - (a) ^= (b) & (c); \ - } while (0) - -/** - * \brief Applies the SBOX to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sbox(uint32_t S[8]) -{ - uint32_t a, b, c, d; - - /* PI_0 on the first half of the state */ - a = S[0]; b = S[1]; c = S[2]; d = S[3]; - S_LAYER(a, b, c, d); - S[0] = b; S[1] = c; S[2] = d; S[3] = a; - - /* PI_1 on the second half of the state */ - a = S[4]; b = S[5]; c = S[6]; d = S[7]; - S_LAYER(a, b, c, d); - S[4] = d; S[5] = b; S[6] = a; S[7] = c; -} - -/** - * \brief Applies the inverse of the SBOX to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sbox_inverse(uint32_t S[8]) -{ - uint32_t a, b, c, d; - - /* PI_0 on the first half of the state */ - b = S[0]; c = S[1]; d = S[2]; a = S[3]; - S_LAYER_INVERSE(a, b, c, d); - S[0] = a; S[1] = b; S[2] = c; S[3] = d; - - /* PI_1 on the second half of the state */ - d = S[4]; b = S[5]; a = S[6]; c = S[7]; - S_LAYER_INVERSE(a, b, c, d); - S[4] = a; S[5] = b; S[6] = c; S[7] = d; -} - -/** - * \brief Applies the MDS matrix to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_mds(uint32_t S[8]) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t tmp; - - /* Load the state into temporary working variables */ - x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; - x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; - - /* Apply the MDS matrix to the state */ - #define SWAP(a) (((a) << 16) | ((a) >> 16)) - #define MUL(x0, x1, x2, x3, tmp) \ - do { \ - tmp = x0; x0 = x1; x1 = x2; x2 = x3; x3 = tmp ^ x0; \ - } while (0) - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - MUL(x4, x5, x6, x7, tmp); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - MUL(x0, x1, x2, x3, tmp); - MUL(x0, x1, x2, x3, tmp); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - - /* Store the temporary working variables back into the state */ - S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; - S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; -} - -/** - * \brief Applies the inverse of the MDS matrix to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_mds_inverse(uint32_t S[8]) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t tmp; - - /* Load the state into temporary working variables */ - x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; - x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; - - /* Apply the inverse of the MDS matrix to the state */ - #define MULINV(x0, x1, x2, x3, tmp) \ - do { \ - tmp = x3; x3 = x2; x2 = x1; x1 = x0; x0 = x1 ^ tmp; \ - } while (0) - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - MULINV(x0, x1, x2, x3, tmp); - MULINV(x0, x1, x2, x3, tmp); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - MULINV(x4, x5, x6, x7, tmp); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - - /* Store the temporary working variables back into the state */ - S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; - S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; -} - -/** - * \brief Applies the slice permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_slice(uint32_t S[8]) -{ - leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); - - leftRotate4_N(S[4], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[5], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[6], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[7], 0x7777U, 1, 0x1111, 3); -} - -/** - * \brief Applies the inverse of the slice permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_slice_inverse(uint32_t S[8]) -{ - leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); - - leftRotate4_N(S[4], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[5], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[6], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[7], 0x1111U, 3, 0x7777, 1); -} - -/** - * \brief Applies the sheet permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sheet(uint32_t S[8]) -{ - leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); - - leftRotate16_N(S[4], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[5], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[6], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[7], 0x0FFFU, 4, 0x000F, 12); -} - -/** - * \brief Applies the inverse of the sheet permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sheet_inverse(uint32_t S[8]) -{ - leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); - - leftRotate16_N(S[4], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[5], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[6], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[7], 0x000FU, 12, 0x0FFF, 4); -} - -/** - * \brief Encrypts a 256-bit block with the SATURNIN block cipher. - * - * \param output Ciphertext output block, 32 bytes. - * \param input Plaintext input block, 32 bytes. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - * - * The \a input and \a output buffers can be the same. - * - * \sa saturnin_block_decrypt() - */ -static void saturnin_block_encrypt - (unsigned char *output, const unsigned char *input, - const unsigned char *key, unsigned rounds, const uint32_t *RC) -{ - uint32_t K[16]; - uint32_t S[8]; - uint32_t temp; - unsigned index; - - /* Unpack the key and the input block */ - for (index = 0; index < 16; index += 2) { - temp = ((uint32_t)(key[index])) | - (((uint32_t)(key[index + 1])) << 8) | - (((uint32_t)(key[index + 16])) << 16) | - (((uint32_t)(key[index + 17])) << 24); - K[index / 2] = temp; - K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | - ((temp >> 5) & 0x07FF07FFU); - S[index / 2] = ((uint32_t)(input[index])) | - (((uint32_t)(input[index + 1])) << 8) | - (((uint32_t)(input[index + 16])) << 16) | - (((uint32_t)(input[index + 17])) << 24); - } - - /* XOR the key into the state */ - saturnin_xor_key(); - - /* Perform all encryption rounds */ - for (; rounds > 0; rounds -= 2, RC += 2) { - saturnin_sbox(S); - saturnin_mds(S); - saturnin_sbox(S); - saturnin_slice(S); - saturnin_mds(S); - saturnin_slice_inverse(S); - S[0] ^= RC[0]; - saturnin_xor_key_rotated(); - - saturnin_sbox(S); - saturnin_mds(S); - saturnin_sbox(S); - saturnin_sheet(S); - saturnin_mds(S); - saturnin_sheet_inverse(S); - S[0] ^= RC[1]; - saturnin_xor_key(); - } - - /* Encode the state into the output block */ - for (index = 0; index < 16; index += 2) { - temp = S[index / 2]; - output[index] = (uint8_t)temp; - output[index + 1] = (uint8_t)(temp >> 8); - output[index + 16] = (uint8_t)(temp >> 16); - output[index + 17] = (uint8_t)(temp >> 24); - } -} - -/** - * \brief Decrypts a 256-bit block with the SATURNIN block cipher. - * - * \param output Plaintext output block, 32 bytes. - * \param input Ciphertext input block, 32 bytes. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - * - * The \a input and \a output buffers can be the same. - * - * \sa saturnin_block_encrypt() - */ -static void saturnin_block_decrypt - (unsigned char *output, const unsigned char *input, - const unsigned char *key, unsigned rounds, const uint32_t *RC) -{ - uint32_t K[16]; - uint32_t S[8]; - uint32_t temp; - unsigned index; - - /* Unpack the key and the input block */ - for (index = 0; index < 16; index += 2) { - temp = ((uint32_t)(key[index])) | - (((uint32_t)(key[index + 1])) << 8) | - (((uint32_t)(key[index + 16])) << 16) | - (((uint32_t)(key[index + 17])) << 24); - K[index / 2] = temp; - K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | - ((temp >> 5) & 0x07FF07FFU); - S[index / 2] = ((uint32_t)(input[index])) | - (((uint32_t)(input[index + 1])) << 8) | - (((uint32_t)(input[index + 16])) << 16) | - (((uint32_t)(input[index + 17])) << 24); - } - - /* Perform all decryption rounds */ - RC += rounds - 2; - for (; rounds > 0; rounds -= 2, RC -= 2) { - saturnin_xor_key(); - S[0] ^= RC[1]; - saturnin_sheet(S); - saturnin_mds_inverse(S); - saturnin_sheet_inverse(S); - saturnin_sbox_inverse(S); - saturnin_mds_inverse(S); - saturnin_sbox_inverse(S); - - saturnin_xor_key_rotated(); - S[0] ^= RC[0]; - saturnin_slice(S); - saturnin_mds_inverse(S); - saturnin_slice_inverse(S); - saturnin_sbox_inverse(S); - saturnin_mds_inverse(S); - saturnin_sbox_inverse(S); - } - - /* XOR the key into the state */ - saturnin_xor_key(); - - /* Encode the state into the output block */ - for (index = 0; index < 16; index += 2) { - temp = S[index / 2]; - output[index] = (uint8_t)temp; - output[index + 1] = (uint8_t)(temp >> 8); - output[index + 16] = (uint8_t)(temp >> 16); - output[index + 17] = (uint8_t)(temp >> 24); - } -} - -/** - * \brief Encrypts a 256-bit block with the SATURNIN block cipher and - * then XOR's itself to generate a new key. - * - * \param block Block to be encrypted and then XOR'ed with itself. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - */ -void saturnin_block_encrypt_xor - (const unsigned char *block, unsigned char *key, - unsigned rounds, const uint32_t *RC) -{ - unsigned char temp[32]; - saturnin_block_encrypt(temp, block, key, rounds, RC); - lw_xor_block_2_src(key, block, temp, 32); -} - -/** - * \brief Encrypts (or decrypts) a data packet in CTR mode. - * - * \param c Output ciphertext buffer. - * \param m Input plaintext buffer. - * \param mlen Length of the plaintext in bytes. - * \param k Points to the 32-byte key. - * \param block Points to the pre-formatted nonce block. - */ -static void saturnin_ctr_encrypt - (unsigned char *c, const unsigned char *m, unsigned long long mlen, - const unsigned char *k, unsigned char *block) -{ - /* Note: Specification requires a 95-bit counter but we only use 32-bit. - * This limits the maximum packet size to 128Gb. That should be OK */ - uint32_t counter = 1; - unsigned char out[32]; - while (mlen >= 32) { - be_store_word32(block + 28, counter); - saturnin_block_encrypt(out, block, k, 10, RC_10_1); - lw_xor_block_2_src(c, out, m, 32); - c += 32; - m += 32; - mlen -= 32; - ++counter; - } - if (mlen > 0) { - be_store_word32(block + 28, counter); - saturnin_block_encrypt(out, block, k, 10, RC_10_1); - lw_xor_block_2_src(c, out, m, (unsigned)mlen); - } -} - -/** - * \brief Pads an authenticates a message. - * - * \param tag Points to the authentication tag. - * \param block Temporary block of 32 bytes from the caller. - * \param m Points to the message to be authenticated. - * \param mlen Length of the message to be authenticated in bytes. - * \param rounds Number of rounds to perform. - * \param RC1 Round constants to use for domain separation on full blocks. - * \param RC2 Round constants to use for domain separation on the last block. - */ -static void saturnin_authenticate - (unsigned char *tag, unsigned char *block, - const unsigned char *m, unsigned long long mlen, - unsigned rounds, const uint32_t *RC1, const uint32_t *RC2) -{ - unsigned temp; - while (mlen >= 32) { - saturnin_block_encrypt_xor(m, tag, rounds, RC1); - m += 32; - mlen -= 32; - } - temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, 31 - temp); - saturnin_block_encrypt_xor(block, tag, rounds, RC2); -} - -int saturnin_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned char *tag; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SATURNIN_TAG_SIZE; - - /* Format the input block from the padded nonce */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - - /* Encrypt the plaintext in counter mode to produce the ciphertext */ - saturnin_ctr_encrypt(c, m, mlen, k, block); - - /* Set the counter back to zero and then encrypt the nonce */ - tag = c + mlen; - memcpy(tag, k, 32); - memset(block + 17, 0, 15); - saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); - - /* Authenticate the associated data and the ciphertext */ - saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); - saturnin_authenticate(tag, block, c, mlen, 10, RC_10_4, RC_10_5); - return 0; -} - -int saturnin_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned char tag[32]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SATURNIN_TAG_SIZE) - return -1; - *mlen = clen - SATURNIN_TAG_SIZE; - - /* Format the input block from the padded nonce */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - - /* Encrypt the nonce to initialize the authentication phase */ - memcpy(tag, k, 32); - saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); - - /* Authenticate the associated data and the ciphertext */ - saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); - saturnin_authenticate(tag, block, c, *mlen, 10, RC_10_4, RC_10_5); - - /* Decrypt the ciphertext in counter mode to produce the plaintext */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - saturnin_ctr_encrypt(m, c, *mlen, k, block); - - /* Check the authentication tag at the end of the message */ - return aead_check_tag - (m, *mlen, tag, c + *mlen, SATURNIN_TAG_SIZE); -} - -int saturnin_short_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned temp; - (void)nsec; - (void)ad; - - /* Validate the parameters: no associated data allowed and m <= 15 bytes */ - if (adlen > 0 || mlen > 15) - return -2; - - /* Format the input block from the nonce and plaintext */ - temp = (unsigned)mlen; - memcpy(block, npub, 16); - memcpy(block + 16, m, temp); - block[16 + temp] = 0x80; /* Padding */ - memset(block + 17 + temp, 0, 15 - temp); - - /* Encrypt the input block to produce the output ciphertext */ - saturnin_block_encrypt(c, block, k, 10, RC_10_6); - *clen = 32; - return 0; -} - -int saturnin_short_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned check1, check2, len; - int index, result; - (void)nsec; - (void)ad; - - /* Validate the parameters: no associated data and c is always 32 bytes */ - if (adlen > 0) - return -2; - if (clen != 32) - return -1; - - /* Decrypt the ciphertext block */ - saturnin_block_decrypt(block, c, k, 10, RC_10_6); - - /* Verify that the output block starts with the nonce and that it is - * padded correctly. We need to do this very carefully to avoid leaking - * any information that could be used in a padding oracle attack. Use the - * same algorithm as the reference implementation of SATURNIN-Short */ - check1 = 0; - for (index = 0; index < 16; ++index) - check1 |= npub[index] ^ block[index]; - check2 = 0xFF; - len = 0; - for (index = 15; index >= 0; --index) { - unsigned temp = block[16 + index]; - unsigned temp2 = check2 & -(1 - (((temp ^ 0x80) + 0xFF) >> 8)); - len |= temp2 & (unsigned)index; - check2 &= ~temp2; - check1 |= check2 & ((temp + 0xFF) >> 8); - } - check1 |= check2; - - /* At this point, check1 is zero if the nonce and plaintext are good, - * or non-zero if there was an error in the decrypted data */ - result = (((int)check1) - 1) >> 8; - - /* The "result" is -1 if the data is good or zero if the data is invalid. - * Copy either the plaintext or zeroes to the output buffer. We assume - * that the output buffer has space for up to 15 bytes. This may return - * some of the padding to the caller but as long as they restrict - * themselves to the first *mlen bytes then it shouldn't be a problem */ - for (index = 0; index < 15; ++index) - m[index] = block[16 + index] & result; - *mlen = len; - return ~result; -} - -int saturnin_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char tag[32]; - unsigned char block[32]; - memset(tag, 0, sizeof(tag)); - saturnin_authenticate(tag, block, in, inlen, 16, RC_16_7, RC_16_8); - memcpy(out, tag, 32); - return 0; -} - -void saturnin_hash_init(saturnin_hash_state_t *state) -{ - memset(state, 0, sizeof(saturnin_hash_state_t)); -} - -void saturnin_hash_update - (saturnin_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned temp; - - /* Handle the partial left-over block from last time */ - if (state->s.count) { - temp = 32 - state->s.count; - if (temp > inlen) { - temp = (unsigned)inlen; - memcpy(state->s.block + state->s.count, in, temp); - state->s.count += temp; - return; - } - memcpy(state->s.block + state->s.count, in, temp); - state->s.count = 0; - in += temp; - inlen -= temp; - saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_7); - } - - /* Process full blocks that are aligned at state->s.count == 0 */ - while (inlen >= 32) { - saturnin_block_encrypt_xor(in, state->s.hash, 16, RC_16_7); - in += 32; - inlen -= 32; - } - - /* Process the left-over block at the end of the input */ - temp = (unsigned)inlen; - memcpy(state->s.block, in, temp); - state->s.count = temp; -} - -void saturnin_hash_finalize - (saturnin_hash_state_t *state, unsigned char *out) -{ - /* Pad the final block */ - state->s.block[state->s.count] = 0x80; - memset(state->s.block + state->s.count + 1, 0, 31 - state->s.count); - - /* Generate the final hash value */ - saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_8); - memcpy(out, state->s.hash, 32); -} diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.h b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.h deleted file mode 100644 index 873d950..0000000 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys-avr/saturnin.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SATURNIN_H -#define LWCRYPTO_SATURNIN_H - -#include "aead-common.h" - -/** - * \file saturnin.h - * \brief Saturnin authenticated encryption algorithm. - * - * The Saturnin family consists of two members: SATURNIN-CTR-Cascade and - * SATURNIN-Short. Both take a 256-bit key and a 128-bit nonce. - * Internally they use a 256-bit block cipher similar in construction to AES. - * - * SATURNIN-Short does not support associated data or plaintext packets - * with more than 15 bytes. This makes it very efficient on short packets - * with only a single block operation involved. - * - * This implementation of SATURNIN-Short will return an error if the - * caller supplies associated data or more than 15 bytes of plaintext. - * - * References: https://project.inria.fr/saturnin/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SATURNIN family members. - */ -#define SATURNIN_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for SATURNIN-CTR-Cascade or the - * total size of the ciphertext for SATURNIN-Short. - */ -#define SATURNIN_TAG_SIZE 32 - -/** - * \brief Size of the nonce for all SATURNIN family members. - */ -#define SATURNIN_NONCE_SIZE 16 - -/** - * \brief Size of the hash for SATURNIN-Hash. - */ -#define SATURNIN_HASH_SIZE 32 - -/** - * \brief State information for SATURNIN-Hash incremental modes. - */ -typedef union -{ - struct { - unsigned char hash[32]; /**< Current hash state */ - unsigned char block[32]; /**< Left-over block data from last update */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} saturnin_hash_state_t; - -/** - * \brief Meta-information block for the SATURNIN-CTR-Cascade cipher. - */ -extern aead_cipher_t const saturnin_cipher; - -/** - * \brief Meta-information block for the SATURNIN-Short cipher. - */ -extern aead_cipher_t const saturnin_short_cipher; - -/** - * \brief Meta-information block for SATURNIN-Hash. - */ -extern aead_hash_algorithm_t const saturnin_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with SATURNIN-CTR-Cascade. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 32 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa saturnin_aead_decrypt() - */ -int saturnin_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SATURNIN-CTR-Cascade. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 32 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa saturnin_aead_encrypt() - */ -int saturnin_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SATURNIN-Short. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which is always 32. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes, which must be - * less than or equal to 15 bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes, which must be zero. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or -2 if the caller supplied too many bytes of - * plaintext or they supplied associated data. - * - * \sa saturnin_short_aead_decrypt() - */ -int saturnin_short_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SATURNIN-Short. - * - * \param m Buffer to receive the plaintext message on output. There must - * be at least 15 bytes of space in this buffer even if the caller expects - * to receive less data than that. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext to decrypt. - * \param clen Length of the input data in bytes, which must be 32. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes, which must be zero. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or -2 if the caller supplied associated data. - * - * \sa saturnin_short_aead_encrypt() - */ -int saturnin_short_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with SATURNIN to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * SATURNIN_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int saturnin_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an SATURNIN-Hash hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa saturnin_hash_update(), saturnin_hash_finalize(), saturnin_hash() - */ -void saturnin_hash_init(saturnin_hash_state_t *state); - -/** - * \brief Updates an SATURNIN-Hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa saturnin_hash_init(), saturnin_hash_finalize() - */ -void saturnin_hash_update - (saturnin_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an SATURNIN-Hash hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa saturnin_hash_init(), saturnin_hash_update() - */ -void saturnin_hash_finalize - (saturnin_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys/internal-util.h b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys/internal-util.h index e79158c..e30166d 100644 --- a/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys/internal-util.h +++ b/saturnin/Implementations/crypto_aead/saturninctrcascadev2/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.c b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.h b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/api.h b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/api.h deleted file mode 100644 index 75fabd7..0000000 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 32 -#define CRYPTO_NOOVERLAP 1 diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/encrypt.c b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/encrypt.c deleted file mode 100644 index 29d7d06..0000000 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "saturnin.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return saturnin_short_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return saturnin_short_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/internal-util.h b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.c b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.c deleted file mode 100644 index 734fc69..0000000 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.c +++ /dev/null @@ -1,781 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "saturnin.h" -#include "internal-util.h" -#include - -aead_cipher_t const saturnin_cipher = { - "SATURNIN-CTR-Cascade", - SATURNIN_KEY_SIZE, - SATURNIN_NONCE_SIZE, - SATURNIN_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - saturnin_aead_encrypt, - saturnin_aead_decrypt -}; - -aead_cipher_t const saturnin_short_cipher = { - "SATURNIN-Short", - SATURNIN_KEY_SIZE, - SATURNIN_NONCE_SIZE, - SATURNIN_TAG_SIZE, - AEAD_FLAG_NONE, - saturnin_short_aead_encrypt, - saturnin_short_aead_decrypt -}; - -aead_hash_algorithm_t const saturnin_hash_algorithm = { - "SATURNIN-Hash", - sizeof(saturnin_hash_state_t), - SATURNIN_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - saturnin_hash, - (aead_hash_init_t)saturnin_hash_init, - (aead_hash_update_t)saturnin_hash_update, - (aead_hash_finalize_t)saturnin_hash_finalize, - 0, /* absorb */ - 0 /* squeeze */ -}; - -/* Round constant tables for various combinations of rounds and domain_sep */ -static uint32_t const RC_10_1[] = { - 0x4eb026c2, 0x90595303, 0xaa8fe632, 0xfe928a92, 0x4115a419, - 0x93539532, 0x5db1cc4e, 0x541515ca, 0xbd1f55a8, 0x5a6e1a0d -}; -static uint32_t const RC_10_2[] = { - 0x4e4526b5, 0xa3565ff0, 0x0f8f20d8, 0x0b54bee1, 0x7d1a6c9d, - 0x17a6280a, 0xaa46c986, 0xc1199062, 0x182c5cde, 0xa00d53fe -}; -static uint32_t const RC_10_3[] = { - 0x4e162698, 0xb2535ba1, 0x6c8f9d65, 0x5816ad30, 0x691fd4fa, - 0x6bf5bcf9, 0xf8eb3525, 0xb21decfa, 0x7b3da417, 0xf62c94b4 -}; -static uint32_t const RC_10_4[] = { - 0x4faf265b, 0xc5484616, 0x45dcad21, 0xe08bd607, 0x0504fdb8, - 0x1e1f5257, 0x45fbc216, 0xeb529b1f, 0x52194e32, 0x5498c018 -}; -static uint32_t const RC_10_5[] = { - 0x4ffc2676, 0xd44d4247, 0x26dc109c, 0xb3c9c5d6, 0x110145df, - 0x624cc6a4, 0x17563eb5, 0x9856e787, 0x3108b6fb, 0x02b90752 -}; -static uint32_t const RC_10_6[] = { - 0x4f092601, 0xe7424eb4, 0x83dcd676, 0x460ff1a5, 0x2d0e8d5b, - 0xe6b97b9c, 0xe0a13b7d, 0x0d5a622f, 0x943bbf8d, 0xf8da4ea1 -}; -static uint32_t const RC_16_7[] = { - 0x3fba180c, 0x563ab9ab, 0x125ea5ef, 0x859da26c, 0xb8cf779b, - 0x7d4de793, 0x07efb49f, 0x8d525306, 0x1e08e6ab, 0x41729f87, - 0x8c4aef0a, 0x4aa0c9a7, 0xd93a95ef, 0xbb00d2af, 0xb62c5bf0, - 0x386d94d8 -}; -static uint32_t const RC_16_8[] = { - 0x3c9b19a7, 0xa9098694, 0x23f878da, 0xa7b647d3, 0x74fc9d78, - 0xeacaae11, 0x2f31a677, 0x4cc8c054, 0x2f51ca05, 0x5268f195, - 0x4f5b8a2b, 0xf614b4ac, 0xf1d95401, 0x764d2568, 0x6a493611, - 0x8eef9c3e -}; - -/* Rotate the 4-bit nibbles within a 16-bit word left */ -#define leftRotate4_N(a, mask1, bits1, mask2, bits2) \ - do { \ - uint32_t _temp = (a); \ - (a) = ((_temp & (mask1)) << (bits1)) | \ - ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (4 - (bits1))) | \ - ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ - ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (4 - (bits2))); \ - } while (0) - -/* Rotate 16-bit subwords left */ -#define leftRotate16_N(a, mask1, bits1, mask2, bits2) \ - do { \ - uint32_t _temp = (a); \ - (a) = ((_temp & (mask1)) << (bits1)) | \ - ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (16 - (bits1))) | \ - ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ - ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (16 - (bits2))); \ - } while (0) - -/* XOR the SATURNIN state with the key */ -#define saturnin_xor_key() \ - do { \ - for (index = 0; index < 8; ++index) \ - S[index] ^= K[index]; \ - } while (0) - -/* XOR the SATURNIN state with a rotated version of the key */ -#define saturnin_xor_key_rotated() \ - do { \ - for (index = 0; index < 8; ++index) \ - S[index] ^= K[index + 8]; \ - } while (0) - -/* Apply an SBOX layer for SATURNIN - definition from the specification */ -#define S_LAYER(a, b, c, d) \ - do { \ - (a) ^= (b) & (c); \ - (b) ^= (a) | (d); \ - (d) ^= (b) | (c); \ - (c) ^= (b) & (d); \ - (b) ^= (a) | (c); \ - (a) ^= (b) | (d); \ - } while (0) - -/* Apply an SBOX layer for SATURNIN in reverse */ -#define S_LAYER_INVERSE(a, b, c, d) \ - do { \ - (a) ^= (b) | (d); \ - (b) ^= (a) | (c); \ - (c) ^= (b) & (d); \ - (d) ^= (b) | (c); \ - (b) ^= (a) | (d); \ - (a) ^= (b) & (c); \ - } while (0) - -/** - * \brief Applies the SBOX to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sbox(uint32_t S[8]) -{ - uint32_t a, b, c, d; - - /* PI_0 on the first half of the state */ - a = S[0]; b = S[1]; c = S[2]; d = S[3]; - S_LAYER(a, b, c, d); - S[0] = b; S[1] = c; S[2] = d; S[3] = a; - - /* PI_1 on the second half of the state */ - a = S[4]; b = S[5]; c = S[6]; d = S[7]; - S_LAYER(a, b, c, d); - S[4] = d; S[5] = b; S[6] = a; S[7] = c; -} - -/** - * \brief Applies the inverse of the SBOX to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sbox_inverse(uint32_t S[8]) -{ - uint32_t a, b, c, d; - - /* PI_0 on the first half of the state */ - b = S[0]; c = S[1]; d = S[2]; a = S[3]; - S_LAYER_INVERSE(a, b, c, d); - S[0] = a; S[1] = b; S[2] = c; S[3] = d; - - /* PI_1 on the second half of the state */ - d = S[4]; b = S[5]; a = S[6]; c = S[7]; - S_LAYER_INVERSE(a, b, c, d); - S[4] = a; S[5] = b; S[6] = c; S[7] = d; -} - -/** - * \brief Applies the MDS matrix to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_mds(uint32_t S[8]) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t tmp; - - /* Load the state into temporary working variables */ - x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; - x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; - - /* Apply the MDS matrix to the state */ - #define SWAP(a) (((a) << 16) | ((a) >> 16)) - #define MUL(x0, x1, x2, x3, tmp) \ - do { \ - tmp = x0; x0 = x1; x1 = x2; x2 = x3; x3 = tmp ^ x0; \ - } while (0) - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - MUL(x4, x5, x6, x7, tmp); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - MUL(x0, x1, x2, x3, tmp); - MUL(x0, x1, x2, x3, tmp); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - - /* Store the temporary working variables back into the state */ - S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; - S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; -} - -/** - * \brief Applies the inverse of the MDS matrix to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_mds_inverse(uint32_t S[8]) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t tmp; - - /* Load the state into temporary working variables */ - x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; - x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; - - /* Apply the inverse of the MDS matrix to the state */ - #define MULINV(x0, x1, x2, x3, tmp) \ - do { \ - tmp = x3; x3 = x2; x2 = x1; x1 = x0; x0 = x1 ^ tmp; \ - } while (0) - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - MULINV(x0, x1, x2, x3, tmp); - MULINV(x0, x1, x2, x3, tmp); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - MULINV(x4, x5, x6, x7, tmp); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - - /* Store the temporary working variables back into the state */ - S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; - S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; -} - -/** - * \brief Applies the slice permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_slice(uint32_t S[8]) -{ - leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); - - leftRotate4_N(S[4], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[5], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[6], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[7], 0x7777U, 1, 0x1111, 3); -} - -/** - * \brief Applies the inverse of the slice permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_slice_inverse(uint32_t S[8]) -{ - leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); - - leftRotate4_N(S[4], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[5], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[6], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[7], 0x1111U, 3, 0x7777, 1); -} - -/** - * \brief Applies the sheet permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sheet(uint32_t S[8]) -{ - leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); - - leftRotate16_N(S[4], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[5], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[6], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[7], 0x0FFFU, 4, 0x000F, 12); -} - -/** - * \brief Applies the inverse of the sheet permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sheet_inverse(uint32_t S[8]) -{ - leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); - - leftRotate16_N(S[4], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[5], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[6], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[7], 0x000FU, 12, 0x0FFF, 4); -} - -/** - * \brief Encrypts a 256-bit block with the SATURNIN block cipher. - * - * \param output Ciphertext output block, 32 bytes. - * \param input Plaintext input block, 32 bytes. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - * - * The \a input and \a output buffers can be the same. - * - * \sa saturnin_block_decrypt() - */ -static void saturnin_block_encrypt - (unsigned char *output, const unsigned char *input, - const unsigned char *key, unsigned rounds, const uint32_t *RC) -{ - uint32_t K[16]; - uint32_t S[8]; - uint32_t temp; - unsigned index; - - /* Unpack the key and the input block */ - for (index = 0; index < 16; index += 2) { - temp = ((uint32_t)(key[index])) | - (((uint32_t)(key[index + 1])) << 8) | - (((uint32_t)(key[index + 16])) << 16) | - (((uint32_t)(key[index + 17])) << 24); - K[index / 2] = temp; - K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | - ((temp >> 5) & 0x07FF07FFU); - S[index / 2] = ((uint32_t)(input[index])) | - (((uint32_t)(input[index + 1])) << 8) | - (((uint32_t)(input[index + 16])) << 16) | - (((uint32_t)(input[index + 17])) << 24); - } - - /* XOR the key into the state */ - saturnin_xor_key(); - - /* Perform all encryption rounds */ - for (; rounds > 0; rounds -= 2, RC += 2) { - saturnin_sbox(S); - saturnin_mds(S); - saturnin_sbox(S); - saturnin_slice(S); - saturnin_mds(S); - saturnin_slice_inverse(S); - S[0] ^= RC[0]; - saturnin_xor_key_rotated(); - - saturnin_sbox(S); - saturnin_mds(S); - saturnin_sbox(S); - saturnin_sheet(S); - saturnin_mds(S); - saturnin_sheet_inverse(S); - S[0] ^= RC[1]; - saturnin_xor_key(); - } - - /* Encode the state into the output block */ - for (index = 0; index < 16; index += 2) { - temp = S[index / 2]; - output[index] = (uint8_t)temp; - output[index + 1] = (uint8_t)(temp >> 8); - output[index + 16] = (uint8_t)(temp >> 16); - output[index + 17] = (uint8_t)(temp >> 24); - } -} - -/** - * \brief Decrypts a 256-bit block with the SATURNIN block cipher. - * - * \param output Plaintext output block, 32 bytes. - * \param input Ciphertext input block, 32 bytes. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - * - * The \a input and \a output buffers can be the same. - * - * \sa saturnin_block_encrypt() - */ -static void saturnin_block_decrypt - (unsigned char *output, const unsigned char *input, - const unsigned char *key, unsigned rounds, const uint32_t *RC) -{ - uint32_t K[16]; - uint32_t S[8]; - uint32_t temp; - unsigned index; - - /* Unpack the key and the input block */ - for (index = 0; index < 16; index += 2) { - temp = ((uint32_t)(key[index])) | - (((uint32_t)(key[index + 1])) << 8) | - (((uint32_t)(key[index + 16])) << 16) | - (((uint32_t)(key[index + 17])) << 24); - K[index / 2] = temp; - K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | - ((temp >> 5) & 0x07FF07FFU); - S[index / 2] = ((uint32_t)(input[index])) | - (((uint32_t)(input[index + 1])) << 8) | - (((uint32_t)(input[index + 16])) << 16) | - (((uint32_t)(input[index + 17])) << 24); - } - - /* Perform all decryption rounds */ - RC += rounds - 2; - for (; rounds > 0; rounds -= 2, RC -= 2) { - saturnin_xor_key(); - S[0] ^= RC[1]; - saturnin_sheet(S); - saturnin_mds_inverse(S); - saturnin_sheet_inverse(S); - saturnin_sbox_inverse(S); - saturnin_mds_inverse(S); - saturnin_sbox_inverse(S); - - saturnin_xor_key_rotated(); - S[0] ^= RC[0]; - saturnin_slice(S); - saturnin_mds_inverse(S); - saturnin_slice_inverse(S); - saturnin_sbox_inverse(S); - saturnin_mds_inverse(S); - saturnin_sbox_inverse(S); - } - - /* XOR the key into the state */ - saturnin_xor_key(); - - /* Encode the state into the output block */ - for (index = 0; index < 16; index += 2) { - temp = S[index / 2]; - output[index] = (uint8_t)temp; - output[index + 1] = (uint8_t)(temp >> 8); - output[index + 16] = (uint8_t)(temp >> 16); - output[index + 17] = (uint8_t)(temp >> 24); - } -} - -/** - * \brief Encrypts a 256-bit block with the SATURNIN block cipher and - * then XOR's itself to generate a new key. - * - * \param block Block to be encrypted and then XOR'ed with itself. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - */ -void saturnin_block_encrypt_xor - (const unsigned char *block, unsigned char *key, - unsigned rounds, const uint32_t *RC) -{ - unsigned char temp[32]; - saturnin_block_encrypt(temp, block, key, rounds, RC); - lw_xor_block_2_src(key, block, temp, 32); -} - -/** - * \brief Encrypts (or decrypts) a data packet in CTR mode. - * - * \param c Output ciphertext buffer. - * \param m Input plaintext buffer. - * \param mlen Length of the plaintext in bytes. - * \param k Points to the 32-byte key. - * \param block Points to the pre-formatted nonce block. - */ -static void saturnin_ctr_encrypt - (unsigned char *c, const unsigned char *m, unsigned long long mlen, - const unsigned char *k, unsigned char *block) -{ - /* Note: Specification requires a 95-bit counter but we only use 32-bit. - * This limits the maximum packet size to 128Gb. That should be OK */ - uint32_t counter = 1; - unsigned char out[32]; - while (mlen >= 32) { - be_store_word32(block + 28, counter); - saturnin_block_encrypt(out, block, k, 10, RC_10_1); - lw_xor_block_2_src(c, out, m, 32); - c += 32; - m += 32; - mlen -= 32; - ++counter; - } - if (mlen > 0) { - be_store_word32(block + 28, counter); - saturnin_block_encrypt(out, block, k, 10, RC_10_1); - lw_xor_block_2_src(c, out, m, (unsigned)mlen); - } -} - -/** - * \brief Pads an authenticates a message. - * - * \param tag Points to the authentication tag. - * \param block Temporary block of 32 bytes from the caller. - * \param m Points to the message to be authenticated. - * \param mlen Length of the message to be authenticated in bytes. - * \param rounds Number of rounds to perform. - * \param RC1 Round constants to use for domain separation on full blocks. - * \param RC2 Round constants to use for domain separation on the last block. - */ -static void saturnin_authenticate - (unsigned char *tag, unsigned char *block, - const unsigned char *m, unsigned long long mlen, - unsigned rounds, const uint32_t *RC1, const uint32_t *RC2) -{ - unsigned temp; - while (mlen >= 32) { - saturnin_block_encrypt_xor(m, tag, rounds, RC1); - m += 32; - mlen -= 32; - } - temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, 31 - temp); - saturnin_block_encrypt_xor(block, tag, rounds, RC2); -} - -int saturnin_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned char *tag; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SATURNIN_TAG_SIZE; - - /* Format the input block from the padded nonce */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - - /* Encrypt the plaintext in counter mode to produce the ciphertext */ - saturnin_ctr_encrypt(c, m, mlen, k, block); - - /* Set the counter back to zero and then encrypt the nonce */ - tag = c + mlen; - memcpy(tag, k, 32); - memset(block + 17, 0, 15); - saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); - - /* Authenticate the associated data and the ciphertext */ - saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); - saturnin_authenticate(tag, block, c, mlen, 10, RC_10_4, RC_10_5); - return 0; -} - -int saturnin_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned char tag[32]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SATURNIN_TAG_SIZE) - return -1; - *mlen = clen - SATURNIN_TAG_SIZE; - - /* Format the input block from the padded nonce */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - - /* Encrypt the nonce to initialize the authentication phase */ - memcpy(tag, k, 32); - saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); - - /* Authenticate the associated data and the ciphertext */ - saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); - saturnin_authenticate(tag, block, c, *mlen, 10, RC_10_4, RC_10_5); - - /* Decrypt the ciphertext in counter mode to produce the plaintext */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - saturnin_ctr_encrypt(m, c, *mlen, k, block); - - /* Check the authentication tag at the end of the message */ - return aead_check_tag - (m, *mlen, tag, c + *mlen, SATURNIN_TAG_SIZE); -} - -int saturnin_short_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned temp; - (void)nsec; - (void)ad; - - /* Validate the parameters: no associated data allowed and m <= 15 bytes */ - if (adlen > 0 || mlen > 15) - return -2; - - /* Format the input block from the nonce and plaintext */ - temp = (unsigned)mlen; - memcpy(block, npub, 16); - memcpy(block + 16, m, temp); - block[16 + temp] = 0x80; /* Padding */ - memset(block + 17 + temp, 0, 15 - temp); - - /* Encrypt the input block to produce the output ciphertext */ - saturnin_block_encrypt(c, block, k, 10, RC_10_6); - *clen = 32; - return 0; -} - -int saturnin_short_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned check1, check2, len; - int index, result; - (void)nsec; - (void)ad; - - /* Validate the parameters: no associated data and c is always 32 bytes */ - if (adlen > 0) - return -2; - if (clen != 32) - return -1; - - /* Decrypt the ciphertext block */ - saturnin_block_decrypt(block, c, k, 10, RC_10_6); - - /* Verify that the output block starts with the nonce and that it is - * padded correctly. We need to do this very carefully to avoid leaking - * any information that could be used in a padding oracle attack. Use the - * same algorithm as the reference implementation of SATURNIN-Short */ - check1 = 0; - for (index = 0; index < 16; ++index) - check1 |= npub[index] ^ block[index]; - check2 = 0xFF; - len = 0; - for (index = 15; index >= 0; --index) { - unsigned temp = block[16 + index]; - unsigned temp2 = check2 & -(1 - (((temp ^ 0x80) + 0xFF) >> 8)); - len |= temp2 & (unsigned)index; - check2 &= ~temp2; - check1 |= check2 & ((temp + 0xFF) >> 8); - } - check1 |= check2; - - /* At this point, check1 is zero if the nonce and plaintext are good, - * or non-zero if there was an error in the decrypted data */ - result = (((int)check1) - 1) >> 8; - - /* The "result" is -1 if the data is good or zero if the data is invalid. - * Copy either the plaintext or zeroes to the output buffer. We assume - * that the output buffer has space for up to 15 bytes. This may return - * some of the padding to the caller but as long as they restrict - * themselves to the first *mlen bytes then it shouldn't be a problem */ - for (index = 0; index < 15; ++index) - m[index] = block[16 + index] & result; - *mlen = len; - return ~result; -} - -int saturnin_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char tag[32]; - unsigned char block[32]; - memset(tag, 0, sizeof(tag)); - saturnin_authenticate(tag, block, in, inlen, 16, RC_16_7, RC_16_8); - memcpy(out, tag, 32); - return 0; -} - -void saturnin_hash_init(saturnin_hash_state_t *state) -{ - memset(state, 0, sizeof(saturnin_hash_state_t)); -} - -void saturnin_hash_update - (saturnin_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned temp; - - /* Handle the partial left-over block from last time */ - if (state->s.count) { - temp = 32 - state->s.count; - if (temp > inlen) { - temp = (unsigned)inlen; - memcpy(state->s.block + state->s.count, in, temp); - state->s.count += temp; - return; - } - memcpy(state->s.block + state->s.count, in, temp); - state->s.count = 0; - in += temp; - inlen -= temp; - saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_7); - } - - /* Process full blocks that are aligned at state->s.count == 0 */ - while (inlen >= 32) { - saturnin_block_encrypt_xor(in, state->s.hash, 16, RC_16_7); - in += 32; - inlen -= 32; - } - - /* Process the left-over block at the end of the input */ - temp = (unsigned)inlen; - memcpy(state->s.block, in, temp); - state->s.count = temp; -} - -void saturnin_hash_finalize - (saturnin_hash_state_t *state, unsigned char *out) -{ - /* Pad the final block */ - state->s.block[state->s.count] = 0x80; - memset(state->s.block + state->s.count + 1, 0, 31 - state->s.count); - - /* Generate the final hash value */ - saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_8); - memcpy(out, state->s.hash, 32); -} diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.h b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.h deleted file mode 100644 index 873d950..0000000 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys-avr/saturnin.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SATURNIN_H -#define LWCRYPTO_SATURNIN_H - -#include "aead-common.h" - -/** - * \file saturnin.h - * \brief Saturnin authenticated encryption algorithm. - * - * The Saturnin family consists of two members: SATURNIN-CTR-Cascade and - * SATURNIN-Short. Both take a 256-bit key and a 128-bit nonce. - * Internally they use a 256-bit block cipher similar in construction to AES. - * - * SATURNIN-Short does not support associated data or plaintext packets - * with more than 15 bytes. This makes it very efficient on short packets - * with only a single block operation involved. - * - * This implementation of SATURNIN-Short will return an error if the - * caller supplies associated data or more than 15 bytes of plaintext. - * - * References: https://project.inria.fr/saturnin/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SATURNIN family members. - */ -#define SATURNIN_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for SATURNIN-CTR-Cascade or the - * total size of the ciphertext for SATURNIN-Short. - */ -#define SATURNIN_TAG_SIZE 32 - -/** - * \brief Size of the nonce for all SATURNIN family members. - */ -#define SATURNIN_NONCE_SIZE 16 - -/** - * \brief Size of the hash for SATURNIN-Hash. - */ -#define SATURNIN_HASH_SIZE 32 - -/** - * \brief State information for SATURNIN-Hash incremental modes. - */ -typedef union -{ - struct { - unsigned char hash[32]; /**< Current hash state */ - unsigned char block[32]; /**< Left-over block data from last update */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} saturnin_hash_state_t; - -/** - * \brief Meta-information block for the SATURNIN-CTR-Cascade cipher. - */ -extern aead_cipher_t const saturnin_cipher; - -/** - * \brief Meta-information block for the SATURNIN-Short cipher. - */ -extern aead_cipher_t const saturnin_short_cipher; - -/** - * \brief Meta-information block for SATURNIN-Hash. - */ -extern aead_hash_algorithm_t const saturnin_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with SATURNIN-CTR-Cascade. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 32 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa saturnin_aead_decrypt() - */ -int saturnin_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SATURNIN-CTR-Cascade. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 32 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa saturnin_aead_encrypt() - */ -int saturnin_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SATURNIN-Short. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which is always 32. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes, which must be - * less than or equal to 15 bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes, which must be zero. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or -2 if the caller supplied too many bytes of - * plaintext or they supplied associated data. - * - * \sa saturnin_short_aead_decrypt() - */ -int saturnin_short_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SATURNIN-Short. - * - * \param m Buffer to receive the plaintext message on output. There must - * be at least 15 bytes of space in this buffer even if the caller expects - * to receive less data than that. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext to decrypt. - * \param clen Length of the input data in bytes, which must be 32. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes, which must be zero. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or -2 if the caller supplied associated data. - * - * \sa saturnin_short_aead_encrypt() - */ -int saturnin_short_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with SATURNIN to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * SATURNIN_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int saturnin_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an SATURNIN-Hash hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa saturnin_hash_update(), saturnin_hash_finalize(), saturnin_hash() - */ -void saturnin_hash_init(saturnin_hash_state_t *state); - -/** - * \brief Updates an SATURNIN-Hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa saturnin_hash_init(), saturnin_hash_finalize() - */ -void saturnin_hash_update - (saturnin_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an SATURNIN-Hash hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa saturnin_hash_init(), saturnin_hash_update() - */ -void saturnin_hash_finalize - (saturnin_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys/internal-util.h b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys/internal-util.h index e79158c..e30166d 100644 --- a/saturnin/Implementations/crypto_aead/saturninshortv2/rhys/internal-util.h +++ b/saturnin/Implementations/crypto_aead/saturninshortv2/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.c b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/api.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/hash.c b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/hash.c deleted file mode 100644 index 76a7173..0000000 --- a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "saturnin.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return saturnin_hash(out, in, inlen); -} diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/internal-util.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.c b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.c deleted file mode 100644 index 734fc69..0000000 --- a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.c +++ /dev/null @@ -1,781 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "saturnin.h" -#include "internal-util.h" -#include - -aead_cipher_t const saturnin_cipher = { - "SATURNIN-CTR-Cascade", - SATURNIN_KEY_SIZE, - SATURNIN_NONCE_SIZE, - SATURNIN_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - saturnin_aead_encrypt, - saturnin_aead_decrypt -}; - -aead_cipher_t const saturnin_short_cipher = { - "SATURNIN-Short", - SATURNIN_KEY_SIZE, - SATURNIN_NONCE_SIZE, - SATURNIN_TAG_SIZE, - AEAD_FLAG_NONE, - saturnin_short_aead_encrypt, - saturnin_short_aead_decrypt -}; - -aead_hash_algorithm_t const saturnin_hash_algorithm = { - "SATURNIN-Hash", - sizeof(saturnin_hash_state_t), - SATURNIN_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - saturnin_hash, - (aead_hash_init_t)saturnin_hash_init, - (aead_hash_update_t)saturnin_hash_update, - (aead_hash_finalize_t)saturnin_hash_finalize, - 0, /* absorb */ - 0 /* squeeze */ -}; - -/* Round constant tables for various combinations of rounds and domain_sep */ -static uint32_t const RC_10_1[] = { - 0x4eb026c2, 0x90595303, 0xaa8fe632, 0xfe928a92, 0x4115a419, - 0x93539532, 0x5db1cc4e, 0x541515ca, 0xbd1f55a8, 0x5a6e1a0d -}; -static uint32_t const RC_10_2[] = { - 0x4e4526b5, 0xa3565ff0, 0x0f8f20d8, 0x0b54bee1, 0x7d1a6c9d, - 0x17a6280a, 0xaa46c986, 0xc1199062, 0x182c5cde, 0xa00d53fe -}; -static uint32_t const RC_10_3[] = { - 0x4e162698, 0xb2535ba1, 0x6c8f9d65, 0x5816ad30, 0x691fd4fa, - 0x6bf5bcf9, 0xf8eb3525, 0xb21decfa, 0x7b3da417, 0xf62c94b4 -}; -static uint32_t const RC_10_4[] = { - 0x4faf265b, 0xc5484616, 0x45dcad21, 0xe08bd607, 0x0504fdb8, - 0x1e1f5257, 0x45fbc216, 0xeb529b1f, 0x52194e32, 0x5498c018 -}; -static uint32_t const RC_10_5[] = { - 0x4ffc2676, 0xd44d4247, 0x26dc109c, 0xb3c9c5d6, 0x110145df, - 0x624cc6a4, 0x17563eb5, 0x9856e787, 0x3108b6fb, 0x02b90752 -}; -static uint32_t const RC_10_6[] = { - 0x4f092601, 0xe7424eb4, 0x83dcd676, 0x460ff1a5, 0x2d0e8d5b, - 0xe6b97b9c, 0xe0a13b7d, 0x0d5a622f, 0x943bbf8d, 0xf8da4ea1 -}; -static uint32_t const RC_16_7[] = { - 0x3fba180c, 0x563ab9ab, 0x125ea5ef, 0x859da26c, 0xb8cf779b, - 0x7d4de793, 0x07efb49f, 0x8d525306, 0x1e08e6ab, 0x41729f87, - 0x8c4aef0a, 0x4aa0c9a7, 0xd93a95ef, 0xbb00d2af, 0xb62c5bf0, - 0x386d94d8 -}; -static uint32_t const RC_16_8[] = { - 0x3c9b19a7, 0xa9098694, 0x23f878da, 0xa7b647d3, 0x74fc9d78, - 0xeacaae11, 0x2f31a677, 0x4cc8c054, 0x2f51ca05, 0x5268f195, - 0x4f5b8a2b, 0xf614b4ac, 0xf1d95401, 0x764d2568, 0x6a493611, - 0x8eef9c3e -}; - -/* Rotate the 4-bit nibbles within a 16-bit word left */ -#define leftRotate4_N(a, mask1, bits1, mask2, bits2) \ - do { \ - uint32_t _temp = (a); \ - (a) = ((_temp & (mask1)) << (bits1)) | \ - ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (4 - (bits1))) | \ - ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ - ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (4 - (bits2))); \ - } while (0) - -/* Rotate 16-bit subwords left */ -#define leftRotate16_N(a, mask1, bits1, mask2, bits2) \ - do { \ - uint32_t _temp = (a); \ - (a) = ((_temp & (mask1)) << (bits1)) | \ - ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (16 - (bits1))) | \ - ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ - ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (16 - (bits2))); \ - } while (0) - -/* XOR the SATURNIN state with the key */ -#define saturnin_xor_key() \ - do { \ - for (index = 0; index < 8; ++index) \ - S[index] ^= K[index]; \ - } while (0) - -/* XOR the SATURNIN state with a rotated version of the key */ -#define saturnin_xor_key_rotated() \ - do { \ - for (index = 0; index < 8; ++index) \ - S[index] ^= K[index + 8]; \ - } while (0) - -/* Apply an SBOX layer for SATURNIN - definition from the specification */ -#define S_LAYER(a, b, c, d) \ - do { \ - (a) ^= (b) & (c); \ - (b) ^= (a) | (d); \ - (d) ^= (b) | (c); \ - (c) ^= (b) & (d); \ - (b) ^= (a) | (c); \ - (a) ^= (b) | (d); \ - } while (0) - -/* Apply an SBOX layer for SATURNIN in reverse */ -#define S_LAYER_INVERSE(a, b, c, d) \ - do { \ - (a) ^= (b) | (d); \ - (b) ^= (a) | (c); \ - (c) ^= (b) & (d); \ - (d) ^= (b) | (c); \ - (b) ^= (a) | (d); \ - (a) ^= (b) & (c); \ - } while (0) - -/** - * \brief Applies the SBOX to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sbox(uint32_t S[8]) -{ - uint32_t a, b, c, d; - - /* PI_0 on the first half of the state */ - a = S[0]; b = S[1]; c = S[2]; d = S[3]; - S_LAYER(a, b, c, d); - S[0] = b; S[1] = c; S[2] = d; S[3] = a; - - /* PI_1 on the second half of the state */ - a = S[4]; b = S[5]; c = S[6]; d = S[7]; - S_LAYER(a, b, c, d); - S[4] = d; S[5] = b; S[6] = a; S[7] = c; -} - -/** - * \brief Applies the inverse of the SBOX to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sbox_inverse(uint32_t S[8]) -{ - uint32_t a, b, c, d; - - /* PI_0 on the first half of the state */ - b = S[0]; c = S[1]; d = S[2]; a = S[3]; - S_LAYER_INVERSE(a, b, c, d); - S[0] = a; S[1] = b; S[2] = c; S[3] = d; - - /* PI_1 on the second half of the state */ - d = S[4]; b = S[5]; a = S[6]; c = S[7]; - S_LAYER_INVERSE(a, b, c, d); - S[4] = a; S[5] = b; S[6] = c; S[7] = d; -} - -/** - * \brief Applies the MDS matrix to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_mds(uint32_t S[8]) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t tmp; - - /* Load the state into temporary working variables */ - x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; - x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; - - /* Apply the MDS matrix to the state */ - #define SWAP(a) (((a) << 16) | ((a) >> 16)) - #define MUL(x0, x1, x2, x3, tmp) \ - do { \ - tmp = x0; x0 = x1; x1 = x2; x2 = x3; x3 = tmp ^ x0; \ - } while (0) - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - MUL(x4, x5, x6, x7, tmp); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - MUL(x0, x1, x2, x3, tmp); - MUL(x0, x1, x2, x3, tmp); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - - /* Store the temporary working variables back into the state */ - S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; - S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; -} - -/** - * \brief Applies the inverse of the MDS matrix to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_mds_inverse(uint32_t S[8]) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t tmp; - - /* Load the state into temporary working variables */ - x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; - x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; - - /* Apply the inverse of the MDS matrix to the state */ - #define MULINV(x0, x1, x2, x3, tmp) \ - do { \ - tmp = x3; x3 = x2; x2 = x1; x1 = x0; x0 = x1 ^ tmp; \ - } while (0) - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - MULINV(x0, x1, x2, x3, tmp); - MULINV(x0, x1, x2, x3, tmp); - x6 ^= SWAP(x2); x7 ^= SWAP(x3); - x4 ^= SWAP(x0); x5 ^= SWAP(x1); - MULINV(x4, x5, x6, x7, tmp); - x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; - - /* Store the temporary working variables back into the state */ - S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; - S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; -} - -/** - * \brief Applies the slice permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_slice(uint32_t S[8]) -{ - leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); - - leftRotate4_N(S[4], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[5], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[6], 0x7777U, 1, 0x1111, 3); - leftRotate4_N(S[7], 0x7777U, 1, 0x1111, 3); -} - -/** - * \brief Applies the inverse of the slice permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_slice_inverse(uint32_t S[8]) -{ - leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); - leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); - - leftRotate4_N(S[4], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[5], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[6], 0x1111U, 3, 0x7777, 1); - leftRotate4_N(S[7], 0x1111U, 3, 0x7777, 1); -} - -/** - * \brief Applies the sheet permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sheet(uint32_t S[8]) -{ - leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); - - leftRotate16_N(S[4], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[5], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[6], 0x0FFFU, 4, 0x000F, 12); - leftRotate16_N(S[7], 0x0FFFU, 4, 0x000F, 12); -} - -/** - * \brief Applies the inverse of the sheet permutation to the SATURNIN state. - * - * \param S The state. - */ -static void saturnin_sheet_inverse(uint32_t S[8]) -{ - leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); - leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); - - leftRotate16_N(S[4], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[5], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[6], 0x000FU, 12, 0x0FFF, 4); - leftRotate16_N(S[7], 0x000FU, 12, 0x0FFF, 4); -} - -/** - * \brief Encrypts a 256-bit block with the SATURNIN block cipher. - * - * \param output Ciphertext output block, 32 bytes. - * \param input Plaintext input block, 32 bytes. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - * - * The \a input and \a output buffers can be the same. - * - * \sa saturnin_block_decrypt() - */ -static void saturnin_block_encrypt - (unsigned char *output, const unsigned char *input, - const unsigned char *key, unsigned rounds, const uint32_t *RC) -{ - uint32_t K[16]; - uint32_t S[8]; - uint32_t temp; - unsigned index; - - /* Unpack the key and the input block */ - for (index = 0; index < 16; index += 2) { - temp = ((uint32_t)(key[index])) | - (((uint32_t)(key[index + 1])) << 8) | - (((uint32_t)(key[index + 16])) << 16) | - (((uint32_t)(key[index + 17])) << 24); - K[index / 2] = temp; - K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | - ((temp >> 5) & 0x07FF07FFU); - S[index / 2] = ((uint32_t)(input[index])) | - (((uint32_t)(input[index + 1])) << 8) | - (((uint32_t)(input[index + 16])) << 16) | - (((uint32_t)(input[index + 17])) << 24); - } - - /* XOR the key into the state */ - saturnin_xor_key(); - - /* Perform all encryption rounds */ - for (; rounds > 0; rounds -= 2, RC += 2) { - saturnin_sbox(S); - saturnin_mds(S); - saturnin_sbox(S); - saturnin_slice(S); - saturnin_mds(S); - saturnin_slice_inverse(S); - S[0] ^= RC[0]; - saturnin_xor_key_rotated(); - - saturnin_sbox(S); - saturnin_mds(S); - saturnin_sbox(S); - saturnin_sheet(S); - saturnin_mds(S); - saturnin_sheet_inverse(S); - S[0] ^= RC[1]; - saturnin_xor_key(); - } - - /* Encode the state into the output block */ - for (index = 0; index < 16; index += 2) { - temp = S[index / 2]; - output[index] = (uint8_t)temp; - output[index + 1] = (uint8_t)(temp >> 8); - output[index + 16] = (uint8_t)(temp >> 16); - output[index + 17] = (uint8_t)(temp >> 24); - } -} - -/** - * \brief Decrypts a 256-bit block with the SATURNIN block cipher. - * - * \param output Plaintext output block, 32 bytes. - * \param input Ciphertext input block, 32 bytes. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - * - * The \a input and \a output buffers can be the same. - * - * \sa saturnin_block_encrypt() - */ -static void saturnin_block_decrypt - (unsigned char *output, const unsigned char *input, - const unsigned char *key, unsigned rounds, const uint32_t *RC) -{ - uint32_t K[16]; - uint32_t S[8]; - uint32_t temp; - unsigned index; - - /* Unpack the key and the input block */ - for (index = 0; index < 16; index += 2) { - temp = ((uint32_t)(key[index])) | - (((uint32_t)(key[index + 1])) << 8) | - (((uint32_t)(key[index + 16])) << 16) | - (((uint32_t)(key[index + 17])) << 24); - K[index / 2] = temp; - K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | - ((temp >> 5) & 0x07FF07FFU); - S[index / 2] = ((uint32_t)(input[index])) | - (((uint32_t)(input[index + 1])) << 8) | - (((uint32_t)(input[index + 16])) << 16) | - (((uint32_t)(input[index + 17])) << 24); - } - - /* Perform all decryption rounds */ - RC += rounds - 2; - for (; rounds > 0; rounds -= 2, RC -= 2) { - saturnin_xor_key(); - S[0] ^= RC[1]; - saturnin_sheet(S); - saturnin_mds_inverse(S); - saturnin_sheet_inverse(S); - saturnin_sbox_inverse(S); - saturnin_mds_inverse(S); - saturnin_sbox_inverse(S); - - saturnin_xor_key_rotated(); - S[0] ^= RC[0]; - saturnin_slice(S); - saturnin_mds_inverse(S); - saturnin_slice_inverse(S); - saturnin_sbox_inverse(S); - saturnin_mds_inverse(S); - saturnin_sbox_inverse(S); - } - - /* XOR the key into the state */ - saturnin_xor_key(); - - /* Encode the state into the output block */ - for (index = 0; index < 16; index += 2) { - temp = S[index / 2]; - output[index] = (uint8_t)temp; - output[index + 1] = (uint8_t)(temp >> 8); - output[index + 16] = (uint8_t)(temp >> 16); - output[index + 17] = (uint8_t)(temp >> 24); - } -} - -/** - * \brief Encrypts a 256-bit block with the SATURNIN block cipher and - * then XOR's itself to generate a new key. - * - * \param block Block to be encrypted and then XOR'ed with itself. - * \param key Points to the 32 byte key for the block cipher. - * \param rounds Number of rounds to perform. - * \param RC Round constants to use for domain separation. - */ -void saturnin_block_encrypt_xor - (const unsigned char *block, unsigned char *key, - unsigned rounds, const uint32_t *RC) -{ - unsigned char temp[32]; - saturnin_block_encrypt(temp, block, key, rounds, RC); - lw_xor_block_2_src(key, block, temp, 32); -} - -/** - * \brief Encrypts (or decrypts) a data packet in CTR mode. - * - * \param c Output ciphertext buffer. - * \param m Input plaintext buffer. - * \param mlen Length of the plaintext in bytes. - * \param k Points to the 32-byte key. - * \param block Points to the pre-formatted nonce block. - */ -static void saturnin_ctr_encrypt - (unsigned char *c, const unsigned char *m, unsigned long long mlen, - const unsigned char *k, unsigned char *block) -{ - /* Note: Specification requires a 95-bit counter but we only use 32-bit. - * This limits the maximum packet size to 128Gb. That should be OK */ - uint32_t counter = 1; - unsigned char out[32]; - while (mlen >= 32) { - be_store_word32(block + 28, counter); - saturnin_block_encrypt(out, block, k, 10, RC_10_1); - lw_xor_block_2_src(c, out, m, 32); - c += 32; - m += 32; - mlen -= 32; - ++counter; - } - if (mlen > 0) { - be_store_word32(block + 28, counter); - saturnin_block_encrypt(out, block, k, 10, RC_10_1); - lw_xor_block_2_src(c, out, m, (unsigned)mlen); - } -} - -/** - * \brief Pads an authenticates a message. - * - * \param tag Points to the authentication tag. - * \param block Temporary block of 32 bytes from the caller. - * \param m Points to the message to be authenticated. - * \param mlen Length of the message to be authenticated in bytes. - * \param rounds Number of rounds to perform. - * \param RC1 Round constants to use for domain separation on full blocks. - * \param RC2 Round constants to use for domain separation on the last block. - */ -static void saturnin_authenticate - (unsigned char *tag, unsigned char *block, - const unsigned char *m, unsigned long long mlen, - unsigned rounds, const uint32_t *RC1, const uint32_t *RC2) -{ - unsigned temp; - while (mlen >= 32) { - saturnin_block_encrypt_xor(m, tag, rounds, RC1); - m += 32; - mlen -= 32; - } - temp = (unsigned)mlen; - memcpy(block, m, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, 31 - temp); - saturnin_block_encrypt_xor(block, tag, rounds, RC2); -} - -int saturnin_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned char *tag; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SATURNIN_TAG_SIZE; - - /* Format the input block from the padded nonce */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - - /* Encrypt the plaintext in counter mode to produce the ciphertext */ - saturnin_ctr_encrypt(c, m, mlen, k, block); - - /* Set the counter back to zero and then encrypt the nonce */ - tag = c + mlen; - memcpy(tag, k, 32); - memset(block + 17, 0, 15); - saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); - - /* Authenticate the associated data and the ciphertext */ - saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); - saturnin_authenticate(tag, block, c, mlen, 10, RC_10_4, RC_10_5); - return 0; -} - -int saturnin_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned char tag[32]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SATURNIN_TAG_SIZE) - return -1; - *mlen = clen - SATURNIN_TAG_SIZE; - - /* Format the input block from the padded nonce */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - - /* Encrypt the nonce to initialize the authentication phase */ - memcpy(tag, k, 32); - saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); - - /* Authenticate the associated data and the ciphertext */ - saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); - saturnin_authenticate(tag, block, c, *mlen, 10, RC_10_4, RC_10_5); - - /* Decrypt the ciphertext in counter mode to produce the plaintext */ - memcpy(block, npub, 16); - block[16] = 0x80; - memset(block + 17, 0, 15); - saturnin_ctr_encrypt(m, c, *mlen, k, block); - - /* Check the authentication tag at the end of the message */ - return aead_check_tag - (m, *mlen, tag, c + *mlen, SATURNIN_TAG_SIZE); -} - -int saturnin_short_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned temp; - (void)nsec; - (void)ad; - - /* Validate the parameters: no associated data allowed and m <= 15 bytes */ - if (adlen > 0 || mlen > 15) - return -2; - - /* Format the input block from the nonce and plaintext */ - temp = (unsigned)mlen; - memcpy(block, npub, 16); - memcpy(block + 16, m, temp); - block[16 + temp] = 0x80; /* Padding */ - memset(block + 17 + temp, 0, 15 - temp); - - /* Encrypt the input block to produce the output ciphertext */ - saturnin_block_encrypt(c, block, k, 10, RC_10_6); - *clen = 32; - return 0; -} - -int saturnin_short_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char block[32]; - unsigned check1, check2, len; - int index, result; - (void)nsec; - (void)ad; - - /* Validate the parameters: no associated data and c is always 32 bytes */ - if (adlen > 0) - return -2; - if (clen != 32) - return -1; - - /* Decrypt the ciphertext block */ - saturnin_block_decrypt(block, c, k, 10, RC_10_6); - - /* Verify that the output block starts with the nonce and that it is - * padded correctly. We need to do this very carefully to avoid leaking - * any information that could be used in a padding oracle attack. Use the - * same algorithm as the reference implementation of SATURNIN-Short */ - check1 = 0; - for (index = 0; index < 16; ++index) - check1 |= npub[index] ^ block[index]; - check2 = 0xFF; - len = 0; - for (index = 15; index >= 0; --index) { - unsigned temp = block[16 + index]; - unsigned temp2 = check2 & -(1 - (((temp ^ 0x80) + 0xFF) >> 8)); - len |= temp2 & (unsigned)index; - check2 &= ~temp2; - check1 |= check2 & ((temp + 0xFF) >> 8); - } - check1 |= check2; - - /* At this point, check1 is zero if the nonce and plaintext are good, - * or non-zero if there was an error in the decrypted data */ - result = (((int)check1) - 1) >> 8; - - /* The "result" is -1 if the data is good or zero if the data is invalid. - * Copy either the plaintext or zeroes to the output buffer. We assume - * that the output buffer has space for up to 15 bytes. This may return - * some of the padding to the caller but as long as they restrict - * themselves to the first *mlen bytes then it shouldn't be a problem */ - for (index = 0; index < 15; ++index) - m[index] = block[16 + index] & result; - *mlen = len; - return ~result; -} - -int saturnin_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char tag[32]; - unsigned char block[32]; - memset(tag, 0, sizeof(tag)); - saturnin_authenticate(tag, block, in, inlen, 16, RC_16_7, RC_16_8); - memcpy(out, tag, 32); - return 0; -} - -void saturnin_hash_init(saturnin_hash_state_t *state) -{ - memset(state, 0, sizeof(saturnin_hash_state_t)); -} - -void saturnin_hash_update - (saturnin_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - unsigned temp; - - /* Handle the partial left-over block from last time */ - if (state->s.count) { - temp = 32 - state->s.count; - if (temp > inlen) { - temp = (unsigned)inlen; - memcpy(state->s.block + state->s.count, in, temp); - state->s.count += temp; - return; - } - memcpy(state->s.block + state->s.count, in, temp); - state->s.count = 0; - in += temp; - inlen -= temp; - saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_7); - } - - /* Process full blocks that are aligned at state->s.count == 0 */ - while (inlen >= 32) { - saturnin_block_encrypt_xor(in, state->s.hash, 16, RC_16_7); - in += 32; - inlen -= 32; - } - - /* Process the left-over block at the end of the input */ - temp = (unsigned)inlen; - memcpy(state->s.block, in, temp); - state->s.count = temp; -} - -void saturnin_hash_finalize - (saturnin_hash_state_t *state, unsigned char *out) -{ - /* Pad the final block */ - state->s.block[state->s.count] = 0x80; - memset(state->s.block + state->s.count + 1, 0, 31 - state->s.count); - - /* Generate the final hash value */ - saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_8); - memcpy(out, state->s.hash, 32); -} diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.h deleted file mode 100644 index 873d950..0000000 --- a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys-avr/saturnin.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SATURNIN_H -#define LWCRYPTO_SATURNIN_H - -#include "aead-common.h" - -/** - * \file saturnin.h - * \brief Saturnin authenticated encryption algorithm. - * - * The Saturnin family consists of two members: SATURNIN-CTR-Cascade and - * SATURNIN-Short. Both take a 256-bit key and a 128-bit nonce. - * Internally they use a 256-bit block cipher similar in construction to AES. - * - * SATURNIN-Short does not support associated data or plaintext packets - * with more than 15 bytes. This makes it very efficient on short packets - * with only a single block operation involved. - * - * This implementation of SATURNIN-Short will return an error if the - * caller supplies associated data or more than 15 bytes of plaintext. - * - * References: https://project.inria.fr/saturnin/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SATURNIN family members. - */ -#define SATURNIN_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for SATURNIN-CTR-Cascade or the - * total size of the ciphertext for SATURNIN-Short. - */ -#define SATURNIN_TAG_SIZE 32 - -/** - * \brief Size of the nonce for all SATURNIN family members. - */ -#define SATURNIN_NONCE_SIZE 16 - -/** - * \brief Size of the hash for SATURNIN-Hash. - */ -#define SATURNIN_HASH_SIZE 32 - -/** - * \brief State information for SATURNIN-Hash incremental modes. - */ -typedef union -{ - struct { - unsigned char hash[32]; /**< Current hash state */ - unsigned char block[32]; /**< Left-over block data from last update */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} saturnin_hash_state_t; - -/** - * \brief Meta-information block for the SATURNIN-CTR-Cascade cipher. - */ -extern aead_cipher_t const saturnin_cipher; - -/** - * \brief Meta-information block for the SATURNIN-Short cipher. - */ -extern aead_cipher_t const saturnin_short_cipher; - -/** - * \brief Meta-information block for SATURNIN-Hash. - */ -extern aead_hash_algorithm_t const saturnin_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with SATURNIN-CTR-Cascade. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 32 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa saturnin_aead_decrypt() - */ -int saturnin_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SATURNIN-CTR-Cascade. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 32 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa saturnin_aead_encrypt() - */ -int saturnin_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SATURNIN-Short. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which is always 32. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes, which must be - * less than or equal to 15 bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes, which must be zero. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or -2 if the caller supplied too many bytes of - * plaintext or they supplied associated data. - * - * \sa saturnin_short_aead_decrypt() - */ -int saturnin_short_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SATURNIN-Short. - * - * \param m Buffer to receive the plaintext message on output. There must - * be at least 15 bytes of space in this buffer even if the caller expects - * to receive less data than that. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext to decrypt. - * \param clen Length of the input data in bytes, which must be 32. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes, which must be zero. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or -2 if the caller supplied associated data. - * - * \sa saturnin_short_aead_encrypt() - */ -int saturnin_short_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with SATURNIN to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * SATURNIN_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int saturnin_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an SATURNIN-Hash hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa saturnin_hash_update(), saturnin_hash_finalize(), saturnin_hash() - */ -void saturnin_hash_init(saturnin_hash_state_t *state); - -/** - * \brief Updates an SATURNIN-Hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa saturnin_hash_init(), saturnin_hash_finalize() - */ -void saturnin_hash_update - (saturnin_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an SATURNIN-Hash hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa saturnin_hash_init(), saturnin_hash_update() - */ -void saturnin_hash_finalize - (saturnin_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.c b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/api.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/hash.c b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/hash.c new file mode 100644 index 0000000..76a7173 --- /dev/null +++ b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "saturnin.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return saturnin_hash(out, in, inlen); +} diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/internal-util.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.c b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.c new file mode 100644 index 0000000..734fc69 --- /dev/null +++ b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.c @@ -0,0 +1,781 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "saturnin.h" +#include "internal-util.h" +#include + +aead_cipher_t const saturnin_cipher = { + "SATURNIN-CTR-Cascade", + SATURNIN_KEY_SIZE, + SATURNIN_NONCE_SIZE, + SATURNIN_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + saturnin_aead_encrypt, + saturnin_aead_decrypt +}; + +aead_cipher_t const saturnin_short_cipher = { + "SATURNIN-Short", + SATURNIN_KEY_SIZE, + SATURNIN_NONCE_SIZE, + SATURNIN_TAG_SIZE, + AEAD_FLAG_NONE, + saturnin_short_aead_encrypt, + saturnin_short_aead_decrypt +}; + +aead_hash_algorithm_t const saturnin_hash_algorithm = { + "SATURNIN-Hash", + sizeof(saturnin_hash_state_t), + SATURNIN_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + saturnin_hash, + (aead_hash_init_t)saturnin_hash_init, + (aead_hash_update_t)saturnin_hash_update, + (aead_hash_finalize_t)saturnin_hash_finalize, + 0, /* absorb */ + 0 /* squeeze */ +}; + +/* Round constant tables for various combinations of rounds and domain_sep */ +static uint32_t const RC_10_1[] = { + 0x4eb026c2, 0x90595303, 0xaa8fe632, 0xfe928a92, 0x4115a419, + 0x93539532, 0x5db1cc4e, 0x541515ca, 0xbd1f55a8, 0x5a6e1a0d +}; +static uint32_t const RC_10_2[] = { + 0x4e4526b5, 0xa3565ff0, 0x0f8f20d8, 0x0b54bee1, 0x7d1a6c9d, + 0x17a6280a, 0xaa46c986, 0xc1199062, 0x182c5cde, 0xa00d53fe +}; +static uint32_t const RC_10_3[] = { + 0x4e162698, 0xb2535ba1, 0x6c8f9d65, 0x5816ad30, 0x691fd4fa, + 0x6bf5bcf9, 0xf8eb3525, 0xb21decfa, 0x7b3da417, 0xf62c94b4 +}; +static uint32_t const RC_10_4[] = { + 0x4faf265b, 0xc5484616, 0x45dcad21, 0xe08bd607, 0x0504fdb8, + 0x1e1f5257, 0x45fbc216, 0xeb529b1f, 0x52194e32, 0x5498c018 +}; +static uint32_t const RC_10_5[] = { + 0x4ffc2676, 0xd44d4247, 0x26dc109c, 0xb3c9c5d6, 0x110145df, + 0x624cc6a4, 0x17563eb5, 0x9856e787, 0x3108b6fb, 0x02b90752 +}; +static uint32_t const RC_10_6[] = { + 0x4f092601, 0xe7424eb4, 0x83dcd676, 0x460ff1a5, 0x2d0e8d5b, + 0xe6b97b9c, 0xe0a13b7d, 0x0d5a622f, 0x943bbf8d, 0xf8da4ea1 +}; +static uint32_t const RC_16_7[] = { + 0x3fba180c, 0x563ab9ab, 0x125ea5ef, 0x859da26c, 0xb8cf779b, + 0x7d4de793, 0x07efb49f, 0x8d525306, 0x1e08e6ab, 0x41729f87, + 0x8c4aef0a, 0x4aa0c9a7, 0xd93a95ef, 0xbb00d2af, 0xb62c5bf0, + 0x386d94d8 +}; +static uint32_t const RC_16_8[] = { + 0x3c9b19a7, 0xa9098694, 0x23f878da, 0xa7b647d3, 0x74fc9d78, + 0xeacaae11, 0x2f31a677, 0x4cc8c054, 0x2f51ca05, 0x5268f195, + 0x4f5b8a2b, 0xf614b4ac, 0xf1d95401, 0x764d2568, 0x6a493611, + 0x8eef9c3e +}; + +/* Rotate the 4-bit nibbles within a 16-bit word left */ +#define leftRotate4_N(a, mask1, bits1, mask2, bits2) \ + do { \ + uint32_t _temp = (a); \ + (a) = ((_temp & (mask1)) << (bits1)) | \ + ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (4 - (bits1))) | \ + ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ + ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (4 - (bits2))); \ + } while (0) + +/* Rotate 16-bit subwords left */ +#define leftRotate16_N(a, mask1, bits1, mask2, bits2) \ + do { \ + uint32_t _temp = (a); \ + (a) = ((_temp & (mask1)) << (bits1)) | \ + ((_temp & ((mask1) ^ (uint32_t)0xFFFFU)) >> (16 - (bits1))) | \ + ((_temp & (((uint32_t)(mask2)) << 16)) << (bits2)) | \ + ((_temp & (((uint32_t)((mask2)) << 16) ^ 0xFFFF0000U)) >> (16 - (bits2))); \ + } while (0) + +/* XOR the SATURNIN state with the key */ +#define saturnin_xor_key() \ + do { \ + for (index = 0; index < 8; ++index) \ + S[index] ^= K[index]; \ + } while (0) + +/* XOR the SATURNIN state with a rotated version of the key */ +#define saturnin_xor_key_rotated() \ + do { \ + for (index = 0; index < 8; ++index) \ + S[index] ^= K[index + 8]; \ + } while (0) + +/* Apply an SBOX layer for SATURNIN - definition from the specification */ +#define S_LAYER(a, b, c, d) \ + do { \ + (a) ^= (b) & (c); \ + (b) ^= (a) | (d); \ + (d) ^= (b) | (c); \ + (c) ^= (b) & (d); \ + (b) ^= (a) | (c); \ + (a) ^= (b) | (d); \ + } while (0) + +/* Apply an SBOX layer for SATURNIN in reverse */ +#define S_LAYER_INVERSE(a, b, c, d) \ + do { \ + (a) ^= (b) | (d); \ + (b) ^= (a) | (c); \ + (c) ^= (b) & (d); \ + (d) ^= (b) | (c); \ + (b) ^= (a) | (d); \ + (a) ^= (b) & (c); \ + } while (0) + +/** + * \brief Applies the SBOX to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_sbox(uint32_t S[8]) +{ + uint32_t a, b, c, d; + + /* PI_0 on the first half of the state */ + a = S[0]; b = S[1]; c = S[2]; d = S[3]; + S_LAYER(a, b, c, d); + S[0] = b; S[1] = c; S[2] = d; S[3] = a; + + /* PI_1 on the second half of the state */ + a = S[4]; b = S[5]; c = S[6]; d = S[7]; + S_LAYER(a, b, c, d); + S[4] = d; S[5] = b; S[6] = a; S[7] = c; +} + +/** + * \brief Applies the inverse of the SBOX to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_sbox_inverse(uint32_t S[8]) +{ + uint32_t a, b, c, d; + + /* PI_0 on the first half of the state */ + b = S[0]; c = S[1]; d = S[2]; a = S[3]; + S_LAYER_INVERSE(a, b, c, d); + S[0] = a; S[1] = b; S[2] = c; S[3] = d; + + /* PI_1 on the second half of the state */ + d = S[4]; b = S[5]; a = S[6]; c = S[7]; + S_LAYER_INVERSE(a, b, c, d); + S[4] = a; S[5] = b; S[6] = c; S[7] = d; +} + +/** + * \brief Applies the MDS matrix to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_mds(uint32_t S[8]) +{ + uint32_t x0, x1, x2, x3, x4, x5, x6, x7; + uint32_t tmp; + + /* Load the state into temporary working variables */ + x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; + x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; + + /* Apply the MDS matrix to the state */ + #define SWAP(a) (((a) << 16) | ((a) >> 16)) + #define MUL(x0, x1, x2, x3, tmp) \ + do { \ + tmp = x0; x0 = x1; x1 = x2; x2 = x3; x3 = tmp ^ x0; \ + } while (0) + x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; + MUL(x4, x5, x6, x7, tmp); + x4 ^= SWAP(x0); x5 ^= SWAP(x1); + x6 ^= SWAP(x2); x7 ^= SWAP(x3); + MUL(x0, x1, x2, x3, tmp); + MUL(x0, x1, x2, x3, tmp); + x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; + x4 ^= SWAP(x0); x5 ^= SWAP(x1); + x6 ^= SWAP(x2); x7 ^= SWAP(x3); + + /* Store the temporary working variables back into the state */ + S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; + S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; +} + +/** + * \brief Applies the inverse of the MDS matrix to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_mds_inverse(uint32_t S[8]) +{ + uint32_t x0, x1, x2, x3, x4, x5, x6, x7; + uint32_t tmp; + + /* Load the state into temporary working variables */ + x0 = S[0]; x1 = S[1]; x2 = S[2]; x3 = S[3]; + x4 = S[4]; x5 = S[5]; x6 = S[6]; x7 = S[7]; + + /* Apply the inverse of the MDS matrix to the state */ + #define MULINV(x0, x1, x2, x3, tmp) \ + do { \ + tmp = x3; x3 = x2; x2 = x1; x1 = x0; x0 = x1 ^ tmp; \ + } while (0) + x6 ^= SWAP(x2); x7 ^= SWAP(x3); + x4 ^= SWAP(x0); x5 ^= SWAP(x1); + x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; + MULINV(x0, x1, x2, x3, tmp); + MULINV(x0, x1, x2, x3, tmp); + x6 ^= SWAP(x2); x7 ^= SWAP(x3); + x4 ^= SWAP(x0); x5 ^= SWAP(x1); + MULINV(x4, x5, x6, x7, tmp); + x0 ^= x4; x1 ^= x5; x2 ^= x6; x3 ^= x7; + + /* Store the temporary working variables back into the state */ + S[0] = x0; S[1] = x1; S[2] = x2; S[3] = x3; + S[4] = x4; S[5] = x5; S[6] = x6; S[7] = x7; +} + +/** + * \brief Applies the slice permutation to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_slice(uint32_t S[8]) +{ + leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); + leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); + leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); + leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); + + leftRotate4_N(S[4], 0x7777U, 1, 0x1111, 3); + leftRotate4_N(S[5], 0x7777U, 1, 0x1111, 3); + leftRotate4_N(S[6], 0x7777U, 1, 0x1111, 3); + leftRotate4_N(S[7], 0x7777U, 1, 0x1111, 3); +} + +/** + * \brief Applies the inverse of the slice permutation to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_slice_inverse(uint32_t S[8]) +{ + leftRotate4_N(S[0], 0xFFFFU, 0, 0x3333, 2); + leftRotate4_N(S[1], 0xFFFFU, 0, 0x3333, 2); + leftRotate4_N(S[2], 0xFFFFU, 0, 0x3333, 2); + leftRotate4_N(S[3], 0xFFFFU, 0, 0x3333, 2); + + leftRotate4_N(S[4], 0x1111U, 3, 0x7777, 1); + leftRotate4_N(S[5], 0x1111U, 3, 0x7777, 1); + leftRotate4_N(S[6], 0x1111U, 3, 0x7777, 1); + leftRotate4_N(S[7], 0x1111U, 3, 0x7777, 1); +} + +/** + * \brief Applies the sheet permutation to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_sheet(uint32_t S[8]) +{ + leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); + leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); + leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); + leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); + + leftRotate16_N(S[4], 0x0FFFU, 4, 0x000F, 12); + leftRotate16_N(S[5], 0x0FFFU, 4, 0x000F, 12); + leftRotate16_N(S[6], 0x0FFFU, 4, 0x000F, 12); + leftRotate16_N(S[7], 0x0FFFU, 4, 0x000F, 12); +} + +/** + * \brief Applies the inverse of the sheet permutation to the SATURNIN state. + * + * \param S The state. + */ +static void saturnin_sheet_inverse(uint32_t S[8]) +{ + leftRotate16_N(S[0], 0xFFFFU, 0, 0x00FF, 8); + leftRotate16_N(S[1], 0xFFFFU, 0, 0x00FF, 8); + leftRotate16_N(S[2], 0xFFFFU, 0, 0x00FF, 8); + leftRotate16_N(S[3], 0xFFFFU, 0, 0x00FF, 8); + + leftRotate16_N(S[4], 0x000FU, 12, 0x0FFF, 4); + leftRotate16_N(S[5], 0x000FU, 12, 0x0FFF, 4); + leftRotate16_N(S[6], 0x000FU, 12, 0x0FFF, 4); + leftRotate16_N(S[7], 0x000FU, 12, 0x0FFF, 4); +} + +/** + * \brief Encrypts a 256-bit block with the SATURNIN block cipher. + * + * \param output Ciphertext output block, 32 bytes. + * \param input Plaintext input block, 32 bytes. + * \param key Points to the 32 byte key for the block cipher. + * \param rounds Number of rounds to perform. + * \param RC Round constants to use for domain separation. + * + * The \a input and \a output buffers can be the same. + * + * \sa saturnin_block_decrypt() + */ +static void saturnin_block_encrypt + (unsigned char *output, const unsigned char *input, + const unsigned char *key, unsigned rounds, const uint32_t *RC) +{ + uint32_t K[16]; + uint32_t S[8]; + uint32_t temp; + unsigned index; + + /* Unpack the key and the input block */ + for (index = 0; index < 16; index += 2) { + temp = ((uint32_t)(key[index])) | + (((uint32_t)(key[index + 1])) << 8) | + (((uint32_t)(key[index + 16])) << 16) | + (((uint32_t)(key[index + 17])) << 24); + K[index / 2] = temp; + K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | + ((temp >> 5) & 0x07FF07FFU); + S[index / 2] = ((uint32_t)(input[index])) | + (((uint32_t)(input[index + 1])) << 8) | + (((uint32_t)(input[index + 16])) << 16) | + (((uint32_t)(input[index + 17])) << 24); + } + + /* XOR the key into the state */ + saturnin_xor_key(); + + /* Perform all encryption rounds */ + for (; rounds > 0; rounds -= 2, RC += 2) { + saturnin_sbox(S); + saturnin_mds(S); + saturnin_sbox(S); + saturnin_slice(S); + saturnin_mds(S); + saturnin_slice_inverse(S); + S[0] ^= RC[0]; + saturnin_xor_key_rotated(); + + saturnin_sbox(S); + saturnin_mds(S); + saturnin_sbox(S); + saturnin_sheet(S); + saturnin_mds(S); + saturnin_sheet_inverse(S); + S[0] ^= RC[1]; + saturnin_xor_key(); + } + + /* Encode the state into the output block */ + for (index = 0; index < 16; index += 2) { + temp = S[index / 2]; + output[index] = (uint8_t)temp; + output[index + 1] = (uint8_t)(temp >> 8); + output[index + 16] = (uint8_t)(temp >> 16); + output[index + 17] = (uint8_t)(temp >> 24); + } +} + +/** + * \brief Decrypts a 256-bit block with the SATURNIN block cipher. + * + * \param output Plaintext output block, 32 bytes. + * \param input Ciphertext input block, 32 bytes. + * \param key Points to the 32 byte key for the block cipher. + * \param rounds Number of rounds to perform. + * \param RC Round constants to use for domain separation. + * + * The \a input and \a output buffers can be the same. + * + * \sa saturnin_block_encrypt() + */ +static void saturnin_block_decrypt + (unsigned char *output, const unsigned char *input, + const unsigned char *key, unsigned rounds, const uint32_t *RC) +{ + uint32_t K[16]; + uint32_t S[8]; + uint32_t temp; + unsigned index; + + /* Unpack the key and the input block */ + for (index = 0; index < 16; index += 2) { + temp = ((uint32_t)(key[index])) | + (((uint32_t)(key[index + 1])) << 8) | + (((uint32_t)(key[index + 16])) << 16) | + (((uint32_t)(key[index + 17])) << 24); + K[index / 2] = temp; + K[8 + (index / 2)] = ((temp & 0x001F001FU) << 11) | + ((temp >> 5) & 0x07FF07FFU); + S[index / 2] = ((uint32_t)(input[index])) | + (((uint32_t)(input[index + 1])) << 8) | + (((uint32_t)(input[index + 16])) << 16) | + (((uint32_t)(input[index + 17])) << 24); + } + + /* Perform all decryption rounds */ + RC += rounds - 2; + for (; rounds > 0; rounds -= 2, RC -= 2) { + saturnin_xor_key(); + S[0] ^= RC[1]; + saturnin_sheet(S); + saturnin_mds_inverse(S); + saturnin_sheet_inverse(S); + saturnin_sbox_inverse(S); + saturnin_mds_inverse(S); + saturnin_sbox_inverse(S); + + saturnin_xor_key_rotated(); + S[0] ^= RC[0]; + saturnin_slice(S); + saturnin_mds_inverse(S); + saturnin_slice_inverse(S); + saturnin_sbox_inverse(S); + saturnin_mds_inverse(S); + saturnin_sbox_inverse(S); + } + + /* XOR the key into the state */ + saturnin_xor_key(); + + /* Encode the state into the output block */ + for (index = 0; index < 16; index += 2) { + temp = S[index / 2]; + output[index] = (uint8_t)temp; + output[index + 1] = (uint8_t)(temp >> 8); + output[index + 16] = (uint8_t)(temp >> 16); + output[index + 17] = (uint8_t)(temp >> 24); + } +} + +/** + * \brief Encrypts a 256-bit block with the SATURNIN block cipher and + * then XOR's itself to generate a new key. + * + * \param block Block to be encrypted and then XOR'ed with itself. + * \param key Points to the 32 byte key for the block cipher. + * \param rounds Number of rounds to perform. + * \param RC Round constants to use for domain separation. + */ +void saturnin_block_encrypt_xor + (const unsigned char *block, unsigned char *key, + unsigned rounds, const uint32_t *RC) +{ + unsigned char temp[32]; + saturnin_block_encrypt(temp, block, key, rounds, RC); + lw_xor_block_2_src(key, block, temp, 32); +} + +/** + * \brief Encrypts (or decrypts) a data packet in CTR mode. + * + * \param c Output ciphertext buffer. + * \param m Input plaintext buffer. + * \param mlen Length of the plaintext in bytes. + * \param k Points to the 32-byte key. + * \param block Points to the pre-formatted nonce block. + */ +static void saturnin_ctr_encrypt + (unsigned char *c, const unsigned char *m, unsigned long long mlen, + const unsigned char *k, unsigned char *block) +{ + /* Note: Specification requires a 95-bit counter but we only use 32-bit. + * This limits the maximum packet size to 128Gb. That should be OK */ + uint32_t counter = 1; + unsigned char out[32]; + while (mlen >= 32) { + be_store_word32(block + 28, counter); + saturnin_block_encrypt(out, block, k, 10, RC_10_1); + lw_xor_block_2_src(c, out, m, 32); + c += 32; + m += 32; + mlen -= 32; + ++counter; + } + if (mlen > 0) { + be_store_word32(block + 28, counter); + saturnin_block_encrypt(out, block, k, 10, RC_10_1); + lw_xor_block_2_src(c, out, m, (unsigned)mlen); + } +} + +/** + * \brief Pads an authenticates a message. + * + * \param tag Points to the authentication tag. + * \param block Temporary block of 32 bytes from the caller. + * \param m Points to the message to be authenticated. + * \param mlen Length of the message to be authenticated in bytes. + * \param rounds Number of rounds to perform. + * \param RC1 Round constants to use for domain separation on full blocks. + * \param RC2 Round constants to use for domain separation on the last block. + */ +static void saturnin_authenticate + (unsigned char *tag, unsigned char *block, + const unsigned char *m, unsigned long long mlen, + unsigned rounds, const uint32_t *RC1, const uint32_t *RC2) +{ + unsigned temp; + while (mlen >= 32) { + saturnin_block_encrypt_xor(m, tag, rounds, RC1); + m += 32; + mlen -= 32; + } + temp = (unsigned)mlen; + memcpy(block, m, temp); + block[temp] = 0x80; + memset(block + temp + 1, 0, 31 - temp); + saturnin_block_encrypt_xor(block, tag, rounds, RC2); +} + +int saturnin_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char block[32]; + unsigned char *tag; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SATURNIN_TAG_SIZE; + + /* Format the input block from the padded nonce */ + memcpy(block, npub, 16); + block[16] = 0x80; + memset(block + 17, 0, 15); + + /* Encrypt the plaintext in counter mode to produce the ciphertext */ + saturnin_ctr_encrypt(c, m, mlen, k, block); + + /* Set the counter back to zero and then encrypt the nonce */ + tag = c + mlen; + memcpy(tag, k, 32); + memset(block + 17, 0, 15); + saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); + + /* Authenticate the associated data and the ciphertext */ + saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); + saturnin_authenticate(tag, block, c, mlen, 10, RC_10_4, RC_10_5); + return 0; +} + +int saturnin_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char block[32]; + unsigned char tag[32]; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SATURNIN_TAG_SIZE) + return -1; + *mlen = clen - SATURNIN_TAG_SIZE; + + /* Format the input block from the padded nonce */ + memcpy(block, npub, 16); + block[16] = 0x80; + memset(block + 17, 0, 15); + + /* Encrypt the nonce to initialize the authentication phase */ + memcpy(tag, k, 32); + saturnin_block_encrypt_xor(block, tag, 10, RC_10_2); + + /* Authenticate the associated data and the ciphertext */ + saturnin_authenticate(tag, block, ad, adlen, 10, RC_10_2, RC_10_3); + saturnin_authenticate(tag, block, c, *mlen, 10, RC_10_4, RC_10_5); + + /* Decrypt the ciphertext in counter mode to produce the plaintext */ + memcpy(block, npub, 16); + block[16] = 0x80; + memset(block + 17, 0, 15); + saturnin_ctr_encrypt(m, c, *mlen, k, block); + + /* Check the authentication tag at the end of the message */ + return aead_check_tag + (m, *mlen, tag, c + *mlen, SATURNIN_TAG_SIZE); +} + +int saturnin_short_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char block[32]; + unsigned temp; + (void)nsec; + (void)ad; + + /* Validate the parameters: no associated data allowed and m <= 15 bytes */ + if (adlen > 0 || mlen > 15) + return -2; + + /* Format the input block from the nonce and plaintext */ + temp = (unsigned)mlen; + memcpy(block, npub, 16); + memcpy(block + 16, m, temp); + block[16 + temp] = 0x80; /* Padding */ + memset(block + 17 + temp, 0, 15 - temp); + + /* Encrypt the input block to produce the output ciphertext */ + saturnin_block_encrypt(c, block, k, 10, RC_10_6); + *clen = 32; + return 0; +} + +int saturnin_short_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + unsigned char block[32]; + unsigned check1, check2, len; + int index, result; + (void)nsec; + (void)ad; + + /* Validate the parameters: no associated data and c is always 32 bytes */ + if (adlen > 0) + return -2; + if (clen != 32) + return -1; + + /* Decrypt the ciphertext block */ + saturnin_block_decrypt(block, c, k, 10, RC_10_6); + + /* Verify that the output block starts with the nonce and that it is + * padded correctly. We need to do this very carefully to avoid leaking + * any information that could be used in a padding oracle attack. Use the + * same algorithm as the reference implementation of SATURNIN-Short */ + check1 = 0; + for (index = 0; index < 16; ++index) + check1 |= npub[index] ^ block[index]; + check2 = 0xFF; + len = 0; + for (index = 15; index >= 0; --index) { + unsigned temp = block[16 + index]; + unsigned temp2 = check2 & -(1 - (((temp ^ 0x80) + 0xFF) >> 8)); + len |= temp2 & (unsigned)index; + check2 &= ~temp2; + check1 |= check2 & ((temp + 0xFF) >> 8); + } + check1 |= check2; + + /* At this point, check1 is zero if the nonce and plaintext are good, + * or non-zero if there was an error in the decrypted data */ + result = (((int)check1) - 1) >> 8; + + /* The "result" is -1 if the data is good or zero if the data is invalid. + * Copy either the plaintext or zeroes to the output buffer. We assume + * that the output buffer has space for up to 15 bytes. This may return + * some of the padding to the caller but as long as they restrict + * themselves to the first *mlen bytes then it shouldn't be a problem */ + for (index = 0; index < 15; ++index) + m[index] = block[16 + index] & result; + *mlen = len; + return ~result; +} + +int saturnin_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char tag[32]; + unsigned char block[32]; + memset(tag, 0, sizeof(tag)); + saturnin_authenticate(tag, block, in, inlen, 16, RC_16_7, RC_16_8); + memcpy(out, tag, 32); + return 0; +} + +void saturnin_hash_init(saturnin_hash_state_t *state) +{ + memset(state, 0, sizeof(saturnin_hash_state_t)); +} + +void saturnin_hash_update + (saturnin_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + unsigned temp; + + /* Handle the partial left-over block from last time */ + if (state->s.count) { + temp = 32 - state->s.count; + if (temp > inlen) { + temp = (unsigned)inlen; + memcpy(state->s.block + state->s.count, in, temp); + state->s.count += temp; + return; + } + memcpy(state->s.block + state->s.count, in, temp); + state->s.count = 0; + in += temp; + inlen -= temp; + saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_7); + } + + /* Process full blocks that are aligned at state->s.count == 0 */ + while (inlen >= 32) { + saturnin_block_encrypt_xor(in, state->s.hash, 16, RC_16_7); + in += 32; + inlen -= 32; + } + + /* Process the left-over block at the end of the input */ + temp = (unsigned)inlen; + memcpy(state->s.block, in, temp); + state->s.count = temp; +} + +void saturnin_hash_finalize + (saturnin_hash_state_t *state, unsigned char *out) +{ + /* Pad the final block */ + state->s.block[state->s.count] = 0x80; + memset(state->s.block + state->s.count + 1, 0, 31 - state->s.count); + + /* Generate the final hash value */ + saturnin_block_encrypt_xor(state->s.block, state->s.hash, 16, RC_16_8); + memcpy(out, state->s.hash, 32); +} diff --git a/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.h b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.h new file mode 100644 index 0000000..873d950 --- /dev/null +++ b/saturnin/Implementations/crypto_hash/saturninhashv2/rhys/saturnin.h @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_SATURNIN_H +#define LWCRYPTO_SATURNIN_H + +#include "aead-common.h" + +/** + * \file saturnin.h + * \brief Saturnin authenticated encryption algorithm. + * + * The Saturnin family consists of two members: SATURNIN-CTR-Cascade and + * SATURNIN-Short. Both take a 256-bit key and a 128-bit nonce. + * Internally they use a 256-bit block cipher similar in construction to AES. + * + * SATURNIN-Short does not support associated data or plaintext packets + * with more than 15 bytes. This makes it very efficient on short packets + * with only a single block operation involved. + * + * This implementation of SATURNIN-Short will return an error if the + * caller supplies associated data or more than 15 bytes of plaintext. + * + * References: https://project.inria.fr/saturnin/ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for all SATURNIN family members. + */ +#define SATURNIN_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for SATURNIN-CTR-Cascade or the + * total size of the ciphertext for SATURNIN-Short. + */ +#define SATURNIN_TAG_SIZE 32 + +/** + * \brief Size of the nonce for all SATURNIN family members. + */ +#define SATURNIN_NONCE_SIZE 16 + +/** + * \brief Size of the hash for SATURNIN-Hash. + */ +#define SATURNIN_HASH_SIZE 32 + +/** + * \brief State information for SATURNIN-Hash incremental modes. + */ +typedef union +{ + struct { + unsigned char hash[32]; /**< Current hash state */ + unsigned char block[32]; /**< Left-over block data from last update */ + unsigned char count; /**< Number of bytes in the current block */ + unsigned char mode; /**< Hash mode: 0 for absorb, 1 for squeeze */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} saturnin_hash_state_t; + +/** + * \brief Meta-information block for the SATURNIN-CTR-Cascade cipher. + */ +extern aead_cipher_t const saturnin_cipher; + +/** + * \brief Meta-information block for the SATURNIN-Short cipher. + */ +extern aead_cipher_t const saturnin_short_cipher; + +/** + * \brief Meta-information block for SATURNIN-Hash. + */ +extern aead_hash_algorithm_t const saturnin_hash_algorithm; + +/** + * \brief Encrypts and authenticates a packet with SATURNIN-CTR-Cascade. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 32 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 32 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa saturnin_aead_decrypt() + */ +int saturnin_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with SATURNIN-CTR-Cascade. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 32 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 32 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa saturnin_aead_encrypt() + */ +int saturnin_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with SATURNIN-Short. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which is always 32. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes, which must be + * less than or equal to 15 bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes, which must be zero. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 32 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or -2 if the caller supplied too many bytes of + * plaintext or they supplied associated data. + * + * \sa saturnin_short_aead_decrypt() + */ +int saturnin_short_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with SATURNIN-Short. + * + * \param m Buffer to receive the plaintext message on output. There must + * be at least 15 bytes of space in this buffer even if the caller expects + * to receive less data than that. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext to decrypt. + * \param clen Length of the input data in bytes, which must be 32. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes, which must be zero. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 32 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or -2 if the caller supplied associated data. + * + * \sa saturnin_short_aead_encrypt() + */ +int saturnin_short_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with SATURNIN to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * SATURNIN_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int saturnin_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an SATURNIN-Hash hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa saturnin_hash_update(), saturnin_hash_finalize(), saturnin_hash() + */ +void saturnin_hash_init(saturnin_hash_state_t *state); + +/** + * \brief Updates an SATURNIN-Hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa saturnin_hash_init(), saturnin_hash_finalize() + */ +void saturnin_hash_update + (saturnin_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an SATURNIN-Hash hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 32-byte hash value. + * + * \sa saturnin_hash_init(), saturnin_hash_update() + */ +void saturnin_hash_finalize + (saturnin_hash_state_t *state, unsigned char *out); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.c b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/api.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/encrypt.c b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/encrypt.c deleted file mode 100644 index 64c6ea2..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "skinny-aead.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m5_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m5_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.c deleted file mode 100644 index 7558527..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.c +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-aead.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const skinny_aead_m1_cipher = { - "SKINNY-AEAD-M1", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M1_NONCE_SIZE, - SKINNY_AEAD_M1_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m1_encrypt, - skinny_aead_m1_decrypt -}; - -aead_cipher_t const skinny_aead_m2_cipher = { - "SKINNY-AEAD-M2", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M2_NONCE_SIZE, - SKINNY_AEAD_M2_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m2_encrypt, - skinny_aead_m2_decrypt -}; - -aead_cipher_t const skinny_aead_m3_cipher = { - "SKINNY-AEAD-M3", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M3_NONCE_SIZE, - SKINNY_AEAD_M3_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m3_encrypt, - skinny_aead_m3_decrypt -}; - -aead_cipher_t const skinny_aead_m4_cipher = { - "SKINNY-AEAD-M4", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M4_NONCE_SIZE, - SKINNY_AEAD_M4_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m4_encrypt, - skinny_aead_m4_decrypt -}; - -aead_cipher_t const skinny_aead_m5_cipher = { - "SKINNY-AEAD-M5", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M5_NONCE_SIZE, - SKINNY_AEAD_M5_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m5_encrypt, - skinny_aead_m5_decrypt -}; - -aead_cipher_t const skinny_aead_m6_cipher = { - "SKINNY-AEAD-M6", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M6_NONCE_SIZE, - SKINNY_AEAD_M6_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m6_encrypt, - skinny_aead_m6_decrypt -}; - -/* Domain separator prefixes for all of the SKINNY-AEAD family members */ -#define DOMAIN_SEP_M1 0x00 -#define DOMAIN_SEP_M2 0x10 -#define DOMAIN_SEP_M3 0x08 -#define DOMAIN_SEP_M4 0x18 -#define DOMAIN_SEP_M5 0x10 -#define DOMAIN_SEP_M6 0x18 - -/** - * \brief Initialize the key and nonce for SKINNY-128-384 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[48]; - memset(k, 0, 16); - memcpy(k + 16, nonce, nonce_len); - memset(k + 16 + nonce_len, 0, 16 - nonce_len); - memcpy(k + 32, key, 16); - skinny_128_384_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_384_set_domain(ks,d) ((ks)->TK1[15] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 64-bit LFSR value. - */ -#define skinny_aead_128_384_set_lfsr(ks,lfsr) le_store_word64((ks)->TK1, (lfsr)) - -/** - * \brief Updates the LFSR value for SKINNY-128-384. - * - * \param lfsr 64-bit LFSR value to be updated. - */ -#define skinny_aead_128_384_update_lfsr(lfsr) \ - do { \ - uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ feedback; \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_384_authenticate - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - skinny_aead_128_384_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_384_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_384_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M1, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M1_TAG_SIZE); - return 0; -} - -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M1_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M1, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M1_TAG_SIZE); -} - -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M2, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M2_TAG_SIZE); - return 0; -} - -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M2_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M2, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M2_TAG_SIZE); -} - -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M3, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M3_TAG_SIZE); - return 0; -} - -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M3_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M3, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M3_TAG_SIZE); -} - -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M4, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M4_TAG_SIZE); - return 0; -} - -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M4_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M4, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M4_TAG_SIZE); -} - -/** - * \brief Initialize the key and nonce for SKINNY-128-256 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[32]; - memset(k, 0, 16 - nonce_len); - memcpy(k + 16 - nonce_len, nonce, nonce_len); - memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_256_set_domain(ks,d) ((ks)->TK1[3] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 24-bit LFSR value. - */ -#define skinny_aead_128_256_set_lfsr(ks,lfsr) \ - do { \ - (ks)->TK1[0] = (uint8_t)(lfsr); \ - (ks)->TK1[1] = (uint8_t)((lfsr) >> 8); \ - (ks)->TK1[2] = (uint8_t)((lfsr) >> 16); \ - } while (0) - -/** - * \brief Updates the LFSR value for SKINNY-128-256. - * - * \param lfsr 24-bit LFSR value to be updated. - */ -#define skinny_aead_128_256_update_lfsr(lfsr) \ - do { \ - uint32_t feedback = ((lfsr) & (((uint32_t)1) << 23)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ (feedback); \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_256_authenticate - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - skinny_aead_128_256_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_256_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_256_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M5, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M5_TAG_SIZE); - return 0; -} - -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M5_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M5, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M5_TAG_SIZE); -} - -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M6, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M6_TAG_SIZE); - return 0; -} - -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M6_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M6, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M6_TAG_SIZE); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.h deleted file mode 100644 index c6b54fb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys-avr/skinny-aead.h +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_AEAD_H -#define LWCRYPTO_SKINNY_AEAD_H - -#include "aead-common.h" - -/** - * \file skinny-aead.h - * \brief Authenticated encryption based on the SKINNY block cipher. - * - * SKINNY-AEAD is a family of authenticated encryption algorithms - * that are built around the SKINNY tweakable block cipher. There - * are six members in the family: - * - * \li SKINNY-AEAD-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li SKINNY-AEAD-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M3 has a 128-bit key, a 128-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M4 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M5 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li SKINNY-AEAD-M6 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The SKINNY-AEAD family also includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SKINNY-AEAD family members. - */ -#define SKINNY_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the SKINNY-AEAD-M1 cipher. - */ -extern aead_cipher_t const skinny_aead_m1_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M2 cipher. - */ -extern aead_cipher_t const skinny_aead_m2_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M3 cipher. - */ -extern aead_cipher_t const skinny_aead_m3_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M4 cipher. - */ -extern aead_cipher_t const skinny_aead_m4_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M5 cipher. - */ -extern aead_cipher_t const skinny_aead_m5_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M6 cipher. - */ -extern aead_cipher_t const skinny_aead_m6_cipher; - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m1_decrypt() - */ -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m1_encrypt() - */ -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m2_decrypt() - */ -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m2_encrypt() - */ -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m3_decrypt() - */ -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m3_encrypt() - */ -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m4_decrypt() - */ -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m4_encrypt() - */ -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m5_decrypt() - */ -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m5_encrypt() - */ -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m6_decrypt() - */ -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m6_encrypt() - */ -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-util.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/skinny-aead.c index 2bb37e9..7558527 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/skinny-aead.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk296128v1/rhys/skinny-aead.c @@ -105,11 +105,12 @@ static void skinny_aead_128_384_init (skinny_128_384_key_schedule_t *ks, const unsigned char *key, const unsigned char *nonce, unsigned nonce_len) { - unsigned char k[32]; - memcpy(k, nonce, nonce_len); - memset(k + nonce_len, 0, 16 - nonce_len); - memcpy(k + 16, key, 16); - skinny_128_384_init(ks, k, 32); + unsigned char k[48]; + memset(k, 0, 16); + memcpy(k + 16, nonce, nonce_len); + memset(k + 16 + nonce_len, 0, 16 - nonce_len); + memcpy(k + 32, key, 16); + skinny_128_384_init(ks, k); } /** @@ -136,7 +137,7 @@ static void skinny_aead_128_384_init #define skinny_aead_128_384_update_lfsr(lfsr) \ do { \ uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) | feedback; \ + (lfsr) = ((lfsr) << 1) ^ feedback; \ } while (0) /** @@ -520,7 +521,7 @@ static void skinny_aead_128_256_init memset(k, 0, 16 - nonce_len); memcpy(k + 16 - nonce_len, nonce, nonce_len); memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k, 32); + skinny_128_256_init(ks, k); } /** diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.c b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/api.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/api.h deleted file mode 100644 index 32c9622..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/encrypt.c b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/encrypt.c deleted file mode 100644 index d304a40..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "skinny-aead.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m6_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m6_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.c deleted file mode 100644 index 7558527..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.c +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-aead.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const skinny_aead_m1_cipher = { - "SKINNY-AEAD-M1", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M1_NONCE_SIZE, - SKINNY_AEAD_M1_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m1_encrypt, - skinny_aead_m1_decrypt -}; - -aead_cipher_t const skinny_aead_m2_cipher = { - "SKINNY-AEAD-M2", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M2_NONCE_SIZE, - SKINNY_AEAD_M2_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m2_encrypt, - skinny_aead_m2_decrypt -}; - -aead_cipher_t const skinny_aead_m3_cipher = { - "SKINNY-AEAD-M3", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M3_NONCE_SIZE, - SKINNY_AEAD_M3_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m3_encrypt, - skinny_aead_m3_decrypt -}; - -aead_cipher_t const skinny_aead_m4_cipher = { - "SKINNY-AEAD-M4", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M4_NONCE_SIZE, - SKINNY_AEAD_M4_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m4_encrypt, - skinny_aead_m4_decrypt -}; - -aead_cipher_t const skinny_aead_m5_cipher = { - "SKINNY-AEAD-M5", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M5_NONCE_SIZE, - SKINNY_AEAD_M5_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m5_encrypt, - skinny_aead_m5_decrypt -}; - -aead_cipher_t const skinny_aead_m6_cipher = { - "SKINNY-AEAD-M6", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M6_NONCE_SIZE, - SKINNY_AEAD_M6_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m6_encrypt, - skinny_aead_m6_decrypt -}; - -/* Domain separator prefixes for all of the SKINNY-AEAD family members */ -#define DOMAIN_SEP_M1 0x00 -#define DOMAIN_SEP_M2 0x10 -#define DOMAIN_SEP_M3 0x08 -#define DOMAIN_SEP_M4 0x18 -#define DOMAIN_SEP_M5 0x10 -#define DOMAIN_SEP_M6 0x18 - -/** - * \brief Initialize the key and nonce for SKINNY-128-384 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[48]; - memset(k, 0, 16); - memcpy(k + 16, nonce, nonce_len); - memset(k + 16 + nonce_len, 0, 16 - nonce_len); - memcpy(k + 32, key, 16); - skinny_128_384_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_384_set_domain(ks,d) ((ks)->TK1[15] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 64-bit LFSR value. - */ -#define skinny_aead_128_384_set_lfsr(ks,lfsr) le_store_word64((ks)->TK1, (lfsr)) - -/** - * \brief Updates the LFSR value for SKINNY-128-384. - * - * \param lfsr 64-bit LFSR value to be updated. - */ -#define skinny_aead_128_384_update_lfsr(lfsr) \ - do { \ - uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ feedback; \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_384_authenticate - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - skinny_aead_128_384_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_384_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_384_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M1, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M1_TAG_SIZE); - return 0; -} - -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M1_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M1, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M1_TAG_SIZE); -} - -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M2, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M2_TAG_SIZE); - return 0; -} - -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M2_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M2, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M2_TAG_SIZE); -} - -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M3, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M3_TAG_SIZE); - return 0; -} - -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M3_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M3, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M3_TAG_SIZE); -} - -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M4, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M4_TAG_SIZE); - return 0; -} - -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M4_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M4, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M4_TAG_SIZE); -} - -/** - * \brief Initialize the key and nonce for SKINNY-128-256 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[32]; - memset(k, 0, 16 - nonce_len); - memcpy(k + 16 - nonce_len, nonce, nonce_len); - memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_256_set_domain(ks,d) ((ks)->TK1[3] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 24-bit LFSR value. - */ -#define skinny_aead_128_256_set_lfsr(ks,lfsr) \ - do { \ - (ks)->TK1[0] = (uint8_t)(lfsr); \ - (ks)->TK1[1] = (uint8_t)((lfsr) >> 8); \ - (ks)->TK1[2] = (uint8_t)((lfsr) >> 16); \ - } while (0) - -/** - * \brief Updates the LFSR value for SKINNY-128-256. - * - * \param lfsr 24-bit LFSR value to be updated. - */ -#define skinny_aead_128_256_update_lfsr(lfsr) \ - do { \ - uint32_t feedback = ((lfsr) & (((uint32_t)1) << 23)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ (feedback); \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_256_authenticate - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - skinny_aead_128_256_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_256_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_256_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M5, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M5_TAG_SIZE); - return 0; -} - -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M5_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M5, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M5_TAG_SIZE); -} - -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M6, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M6_TAG_SIZE); - return 0; -} - -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M6_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M6, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M6_TAG_SIZE); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.h deleted file mode 100644 index c6b54fb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys-avr/skinny-aead.h +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_AEAD_H -#define LWCRYPTO_SKINNY_AEAD_H - -#include "aead-common.h" - -/** - * \file skinny-aead.h - * \brief Authenticated encryption based on the SKINNY block cipher. - * - * SKINNY-AEAD is a family of authenticated encryption algorithms - * that are built around the SKINNY tweakable block cipher. There - * are six members in the family: - * - * \li SKINNY-AEAD-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li SKINNY-AEAD-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M3 has a 128-bit key, a 128-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M4 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M5 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li SKINNY-AEAD-M6 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The SKINNY-AEAD family also includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SKINNY-AEAD family members. - */ -#define SKINNY_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the SKINNY-AEAD-M1 cipher. - */ -extern aead_cipher_t const skinny_aead_m1_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M2 cipher. - */ -extern aead_cipher_t const skinny_aead_m2_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M3 cipher. - */ -extern aead_cipher_t const skinny_aead_m3_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M4 cipher. - */ -extern aead_cipher_t const skinny_aead_m4_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M5 cipher. - */ -extern aead_cipher_t const skinny_aead_m5_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M6 cipher. - */ -extern aead_cipher_t const skinny_aead_m6_cipher; - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m1_decrypt() - */ -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m1_encrypt() - */ -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m2_decrypt() - */ -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m2_encrypt() - */ -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m3_decrypt() - */ -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m3_encrypt() - */ -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m4_decrypt() - */ -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m4_encrypt() - */ -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m5_decrypt() - */ -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m5_encrypt() - */ -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m6_decrypt() - */ -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m6_encrypt() - */ -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-util.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/skinny-aead.c index 2bb37e9..7558527 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/skinny-aead.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk29664v1/rhys/skinny-aead.c @@ -105,11 +105,12 @@ static void skinny_aead_128_384_init (skinny_128_384_key_schedule_t *ks, const unsigned char *key, const unsigned char *nonce, unsigned nonce_len) { - unsigned char k[32]; - memcpy(k, nonce, nonce_len); - memset(k + nonce_len, 0, 16 - nonce_len); - memcpy(k + 16, key, 16); - skinny_128_384_init(ks, k, 32); + unsigned char k[48]; + memset(k, 0, 16); + memcpy(k + 16, nonce, nonce_len); + memset(k + 16 + nonce_len, 0, 16 - nonce_len); + memcpy(k + 32, key, 16); + skinny_128_384_init(ks, k); } /** @@ -136,7 +137,7 @@ static void skinny_aead_128_384_init #define skinny_aead_128_384_update_lfsr(lfsr) \ do { \ uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) | feedback; \ + (lfsr) = ((lfsr) << 1) ^ feedback; \ } while (0) /** @@ -520,7 +521,7 @@ static void skinny_aead_128_256_init memset(k, 0, 16 - nonce_len); memcpy(k + 16 - nonce_len, nonce, nonce_len); memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k, 32); + skinny_128_256_init(ks, k); } /** diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.c b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/api.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/encrypt.c b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/encrypt.c deleted file mode 100644 index 00e9d2e..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "skinny-aead.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m1_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m1_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.c deleted file mode 100644 index 7558527..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.c +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-aead.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const skinny_aead_m1_cipher = { - "SKINNY-AEAD-M1", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M1_NONCE_SIZE, - SKINNY_AEAD_M1_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m1_encrypt, - skinny_aead_m1_decrypt -}; - -aead_cipher_t const skinny_aead_m2_cipher = { - "SKINNY-AEAD-M2", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M2_NONCE_SIZE, - SKINNY_AEAD_M2_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m2_encrypt, - skinny_aead_m2_decrypt -}; - -aead_cipher_t const skinny_aead_m3_cipher = { - "SKINNY-AEAD-M3", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M3_NONCE_SIZE, - SKINNY_AEAD_M3_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m3_encrypt, - skinny_aead_m3_decrypt -}; - -aead_cipher_t const skinny_aead_m4_cipher = { - "SKINNY-AEAD-M4", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M4_NONCE_SIZE, - SKINNY_AEAD_M4_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m4_encrypt, - skinny_aead_m4_decrypt -}; - -aead_cipher_t const skinny_aead_m5_cipher = { - "SKINNY-AEAD-M5", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M5_NONCE_SIZE, - SKINNY_AEAD_M5_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m5_encrypt, - skinny_aead_m5_decrypt -}; - -aead_cipher_t const skinny_aead_m6_cipher = { - "SKINNY-AEAD-M6", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M6_NONCE_SIZE, - SKINNY_AEAD_M6_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m6_encrypt, - skinny_aead_m6_decrypt -}; - -/* Domain separator prefixes for all of the SKINNY-AEAD family members */ -#define DOMAIN_SEP_M1 0x00 -#define DOMAIN_SEP_M2 0x10 -#define DOMAIN_SEP_M3 0x08 -#define DOMAIN_SEP_M4 0x18 -#define DOMAIN_SEP_M5 0x10 -#define DOMAIN_SEP_M6 0x18 - -/** - * \brief Initialize the key and nonce for SKINNY-128-384 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[48]; - memset(k, 0, 16); - memcpy(k + 16, nonce, nonce_len); - memset(k + 16 + nonce_len, 0, 16 - nonce_len); - memcpy(k + 32, key, 16); - skinny_128_384_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_384_set_domain(ks,d) ((ks)->TK1[15] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 64-bit LFSR value. - */ -#define skinny_aead_128_384_set_lfsr(ks,lfsr) le_store_word64((ks)->TK1, (lfsr)) - -/** - * \brief Updates the LFSR value for SKINNY-128-384. - * - * \param lfsr 64-bit LFSR value to be updated. - */ -#define skinny_aead_128_384_update_lfsr(lfsr) \ - do { \ - uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ feedback; \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_384_authenticate - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - skinny_aead_128_384_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_384_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_384_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M1, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M1_TAG_SIZE); - return 0; -} - -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M1_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M1, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M1_TAG_SIZE); -} - -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M2, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M2_TAG_SIZE); - return 0; -} - -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M2_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M2, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M2_TAG_SIZE); -} - -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M3, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M3_TAG_SIZE); - return 0; -} - -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M3_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M3, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M3_TAG_SIZE); -} - -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M4, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M4_TAG_SIZE); - return 0; -} - -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M4_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M4, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M4_TAG_SIZE); -} - -/** - * \brief Initialize the key and nonce for SKINNY-128-256 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[32]; - memset(k, 0, 16 - nonce_len); - memcpy(k + 16 - nonce_len, nonce, nonce_len); - memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_256_set_domain(ks,d) ((ks)->TK1[3] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 24-bit LFSR value. - */ -#define skinny_aead_128_256_set_lfsr(ks,lfsr) \ - do { \ - (ks)->TK1[0] = (uint8_t)(lfsr); \ - (ks)->TK1[1] = (uint8_t)((lfsr) >> 8); \ - (ks)->TK1[2] = (uint8_t)((lfsr) >> 16); \ - } while (0) - -/** - * \brief Updates the LFSR value for SKINNY-128-256. - * - * \param lfsr 24-bit LFSR value to be updated. - */ -#define skinny_aead_128_256_update_lfsr(lfsr) \ - do { \ - uint32_t feedback = ((lfsr) & (((uint32_t)1) << 23)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ (feedback); \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_256_authenticate - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - skinny_aead_128_256_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_256_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_256_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M5, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M5_TAG_SIZE); - return 0; -} - -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M5_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M5, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M5_TAG_SIZE); -} - -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M6, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M6_TAG_SIZE); - return 0; -} - -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M6_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M6, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M6_TAG_SIZE); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.h deleted file mode 100644 index c6b54fb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys-avr/skinny-aead.h +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_AEAD_H -#define LWCRYPTO_SKINNY_AEAD_H - -#include "aead-common.h" - -/** - * \file skinny-aead.h - * \brief Authenticated encryption based on the SKINNY block cipher. - * - * SKINNY-AEAD is a family of authenticated encryption algorithms - * that are built around the SKINNY tweakable block cipher. There - * are six members in the family: - * - * \li SKINNY-AEAD-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li SKINNY-AEAD-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M3 has a 128-bit key, a 128-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M4 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M5 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li SKINNY-AEAD-M6 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The SKINNY-AEAD family also includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SKINNY-AEAD family members. - */ -#define SKINNY_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the SKINNY-AEAD-M1 cipher. - */ -extern aead_cipher_t const skinny_aead_m1_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M2 cipher. - */ -extern aead_cipher_t const skinny_aead_m2_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M3 cipher. - */ -extern aead_cipher_t const skinny_aead_m3_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M4 cipher. - */ -extern aead_cipher_t const skinny_aead_m4_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M5 cipher. - */ -extern aead_cipher_t const skinny_aead_m5_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M6 cipher. - */ -extern aead_cipher_t const skinny_aead_m6_cipher; - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m1_decrypt() - */ -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m1_encrypt() - */ -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m2_decrypt() - */ -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m2_encrypt() - */ -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m3_decrypt() - */ -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m3_encrypt() - */ -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m4_decrypt() - */ -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m4_encrypt() - */ -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m5_decrypt() - */ -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m5_encrypt() - */ -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m6_decrypt() - */ -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m6_encrypt() - */ -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-util.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/skinny-aead.c index 2bb37e9..7558527 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/skinny-aead.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk3128128v1/rhys/skinny-aead.c @@ -105,11 +105,12 @@ static void skinny_aead_128_384_init (skinny_128_384_key_schedule_t *ks, const unsigned char *key, const unsigned char *nonce, unsigned nonce_len) { - unsigned char k[32]; - memcpy(k, nonce, nonce_len); - memset(k + nonce_len, 0, 16 - nonce_len); - memcpy(k + 16, key, 16); - skinny_128_384_init(ks, k, 32); + unsigned char k[48]; + memset(k, 0, 16); + memcpy(k + 16, nonce, nonce_len); + memset(k + 16 + nonce_len, 0, 16 - nonce_len); + memcpy(k + 32, key, 16); + skinny_128_384_init(ks, k); } /** @@ -136,7 +137,7 @@ static void skinny_aead_128_384_init #define skinny_aead_128_384_update_lfsr(lfsr) \ do { \ uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) | feedback; \ + (lfsr) = ((lfsr) << 1) ^ feedback; \ } while (0) /** @@ -520,7 +521,7 @@ static void skinny_aead_128_256_init memset(k, 0, 16 - nonce_len); memcpy(k + 16 - nonce_len, nonce, nonce_len); memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k, 32); + skinny_128_256_init(ks, k); } /** diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.c b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/api.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/api.h deleted file mode 100644 index 4bf8f5c..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/encrypt.c b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/encrypt.c deleted file mode 100644 index db41b19..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "skinny-aead.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m3_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m3_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.c deleted file mode 100644 index 7558527..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.c +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-aead.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const skinny_aead_m1_cipher = { - "SKINNY-AEAD-M1", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M1_NONCE_SIZE, - SKINNY_AEAD_M1_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m1_encrypt, - skinny_aead_m1_decrypt -}; - -aead_cipher_t const skinny_aead_m2_cipher = { - "SKINNY-AEAD-M2", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M2_NONCE_SIZE, - SKINNY_AEAD_M2_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m2_encrypt, - skinny_aead_m2_decrypt -}; - -aead_cipher_t const skinny_aead_m3_cipher = { - "SKINNY-AEAD-M3", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M3_NONCE_SIZE, - SKINNY_AEAD_M3_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m3_encrypt, - skinny_aead_m3_decrypt -}; - -aead_cipher_t const skinny_aead_m4_cipher = { - "SKINNY-AEAD-M4", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M4_NONCE_SIZE, - SKINNY_AEAD_M4_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m4_encrypt, - skinny_aead_m4_decrypt -}; - -aead_cipher_t const skinny_aead_m5_cipher = { - "SKINNY-AEAD-M5", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M5_NONCE_SIZE, - SKINNY_AEAD_M5_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m5_encrypt, - skinny_aead_m5_decrypt -}; - -aead_cipher_t const skinny_aead_m6_cipher = { - "SKINNY-AEAD-M6", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M6_NONCE_SIZE, - SKINNY_AEAD_M6_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m6_encrypt, - skinny_aead_m6_decrypt -}; - -/* Domain separator prefixes for all of the SKINNY-AEAD family members */ -#define DOMAIN_SEP_M1 0x00 -#define DOMAIN_SEP_M2 0x10 -#define DOMAIN_SEP_M3 0x08 -#define DOMAIN_SEP_M4 0x18 -#define DOMAIN_SEP_M5 0x10 -#define DOMAIN_SEP_M6 0x18 - -/** - * \brief Initialize the key and nonce for SKINNY-128-384 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[48]; - memset(k, 0, 16); - memcpy(k + 16, nonce, nonce_len); - memset(k + 16 + nonce_len, 0, 16 - nonce_len); - memcpy(k + 32, key, 16); - skinny_128_384_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_384_set_domain(ks,d) ((ks)->TK1[15] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 64-bit LFSR value. - */ -#define skinny_aead_128_384_set_lfsr(ks,lfsr) le_store_word64((ks)->TK1, (lfsr)) - -/** - * \brief Updates the LFSR value for SKINNY-128-384. - * - * \param lfsr 64-bit LFSR value to be updated. - */ -#define skinny_aead_128_384_update_lfsr(lfsr) \ - do { \ - uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ feedback; \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_384_authenticate - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - skinny_aead_128_384_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_384_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_384_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M1, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M1_TAG_SIZE); - return 0; -} - -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M1_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M1, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M1_TAG_SIZE); -} - -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M2, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M2_TAG_SIZE); - return 0; -} - -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M2_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M2, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M2_TAG_SIZE); -} - -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M3, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M3_TAG_SIZE); - return 0; -} - -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M3_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M3, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M3_TAG_SIZE); -} - -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M4, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M4_TAG_SIZE); - return 0; -} - -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M4_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M4, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M4_TAG_SIZE); -} - -/** - * \brief Initialize the key and nonce for SKINNY-128-256 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[32]; - memset(k, 0, 16 - nonce_len); - memcpy(k + 16 - nonce_len, nonce, nonce_len); - memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_256_set_domain(ks,d) ((ks)->TK1[3] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 24-bit LFSR value. - */ -#define skinny_aead_128_256_set_lfsr(ks,lfsr) \ - do { \ - (ks)->TK1[0] = (uint8_t)(lfsr); \ - (ks)->TK1[1] = (uint8_t)((lfsr) >> 8); \ - (ks)->TK1[2] = (uint8_t)((lfsr) >> 16); \ - } while (0) - -/** - * \brief Updates the LFSR value for SKINNY-128-256. - * - * \param lfsr 24-bit LFSR value to be updated. - */ -#define skinny_aead_128_256_update_lfsr(lfsr) \ - do { \ - uint32_t feedback = ((lfsr) & (((uint32_t)1) << 23)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ (feedback); \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_256_authenticate - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - skinny_aead_128_256_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_256_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_256_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M5, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M5_TAG_SIZE); - return 0; -} - -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M5_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M5, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M5_TAG_SIZE); -} - -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M6, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M6_TAG_SIZE); - return 0; -} - -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M6_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M6, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M6_TAG_SIZE); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.h deleted file mode 100644 index c6b54fb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys-avr/skinny-aead.h +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_AEAD_H -#define LWCRYPTO_SKINNY_AEAD_H - -#include "aead-common.h" - -/** - * \file skinny-aead.h - * \brief Authenticated encryption based on the SKINNY block cipher. - * - * SKINNY-AEAD is a family of authenticated encryption algorithms - * that are built around the SKINNY tweakable block cipher. There - * are six members in the family: - * - * \li SKINNY-AEAD-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li SKINNY-AEAD-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M3 has a 128-bit key, a 128-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M4 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M5 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li SKINNY-AEAD-M6 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The SKINNY-AEAD family also includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SKINNY-AEAD family members. - */ -#define SKINNY_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the SKINNY-AEAD-M1 cipher. - */ -extern aead_cipher_t const skinny_aead_m1_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M2 cipher. - */ -extern aead_cipher_t const skinny_aead_m2_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M3 cipher. - */ -extern aead_cipher_t const skinny_aead_m3_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M4 cipher. - */ -extern aead_cipher_t const skinny_aead_m4_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M5 cipher. - */ -extern aead_cipher_t const skinny_aead_m5_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M6 cipher. - */ -extern aead_cipher_t const skinny_aead_m6_cipher; - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m1_decrypt() - */ -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m1_encrypt() - */ -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m2_decrypt() - */ -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m2_encrypt() - */ -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m3_decrypt() - */ -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m3_encrypt() - */ -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m4_decrypt() - */ -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m4_encrypt() - */ -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m5_decrypt() - */ -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m5_encrypt() - */ -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m6_decrypt() - */ -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m6_encrypt() - */ -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-util.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/skinny-aead.c index 2bb37e9..7558527 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/skinny-aead.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk312864v1/rhys/skinny-aead.c @@ -105,11 +105,12 @@ static void skinny_aead_128_384_init (skinny_128_384_key_schedule_t *ks, const unsigned char *key, const unsigned char *nonce, unsigned nonce_len) { - unsigned char k[32]; - memcpy(k, nonce, nonce_len); - memset(k + nonce_len, 0, 16 - nonce_len); - memcpy(k + 16, key, 16); - skinny_128_384_init(ks, k, 32); + unsigned char k[48]; + memset(k, 0, 16); + memcpy(k + 16, nonce, nonce_len); + memset(k + 16 + nonce_len, 0, 16 - nonce_len); + memcpy(k + 32, key, 16); + skinny_128_384_init(ks, k); } /** @@ -136,7 +137,7 @@ static void skinny_aead_128_384_init #define skinny_aead_128_384_update_lfsr(lfsr) \ do { \ uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) | feedback; \ + (lfsr) = ((lfsr) << 1) ^ feedback; \ } while (0) /** @@ -520,7 +521,7 @@ static void skinny_aead_128_256_init memset(k, 0, 16 - nonce_len); memcpy(k + 16 - nonce_len, nonce, nonce_len); memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k, 32); + skinny_128_256_init(ks, k); } /** diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.c b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/api.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/encrypt.c b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/encrypt.c deleted file mode 100644 index 92605fe..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "skinny-aead.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m2_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m2_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.c deleted file mode 100644 index 7558527..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.c +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-aead.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const skinny_aead_m1_cipher = { - "SKINNY-AEAD-M1", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M1_NONCE_SIZE, - SKINNY_AEAD_M1_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m1_encrypt, - skinny_aead_m1_decrypt -}; - -aead_cipher_t const skinny_aead_m2_cipher = { - "SKINNY-AEAD-M2", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M2_NONCE_SIZE, - SKINNY_AEAD_M2_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m2_encrypt, - skinny_aead_m2_decrypt -}; - -aead_cipher_t const skinny_aead_m3_cipher = { - "SKINNY-AEAD-M3", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M3_NONCE_SIZE, - SKINNY_AEAD_M3_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m3_encrypt, - skinny_aead_m3_decrypt -}; - -aead_cipher_t const skinny_aead_m4_cipher = { - "SKINNY-AEAD-M4", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M4_NONCE_SIZE, - SKINNY_AEAD_M4_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m4_encrypt, - skinny_aead_m4_decrypt -}; - -aead_cipher_t const skinny_aead_m5_cipher = { - "SKINNY-AEAD-M5", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M5_NONCE_SIZE, - SKINNY_AEAD_M5_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m5_encrypt, - skinny_aead_m5_decrypt -}; - -aead_cipher_t const skinny_aead_m6_cipher = { - "SKINNY-AEAD-M6", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M6_NONCE_SIZE, - SKINNY_AEAD_M6_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m6_encrypt, - skinny_aead_m6_decrypt -}; - -/* Domain separator prefixes for all of the SKINNY-AEAD family members */ -#define DOMAIN_SEP_M1 0x00 -#define DOMAIN_SEP_M2 0x10 -#define DOMAIN_SEP_M3 0x08 -#define DOMAIN_SEP_M4 0x18 -#define DOMAIN_SEP_M5 0x10 -#define DOMAIN_SEP_M6 0x18 - -/** - * \brief Initialize the key and nonce for SKINNY-128-384 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[48]; - memset(k, 0, 16); - memcpy(k + 16, nonce, nonce_len); - memset(k + 16 + nonce_len, 0, 16 - nonce_len); - memcpy(k + 32, key, 16); - skinny_128_384_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_384_set_domain(ks,d) ((ks)->TK1[15] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 64-bit LFSR value. - */ -#define skinny_aead_128_384_set_lfsr(ks,lfsr) le_store_word64((ks)->TK1, (lfsr)) - -/** - * \brief Updates the LFSR value for SKINNY-128-384. - * - * \param lfsr 64-bit LFSR value to be updated. - */ -#define skinny_aead_128_384_update_lfsr(lfsr) \ - do { \ - uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ feedback; \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_384_authenticate - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - skinny_aead_128_384_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_384_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_384_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M1, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M1_TAG_SIZE); - return 0; -} - -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M1_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M1, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M1_TAG_SIZE); -} - -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M2, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M2_TAG_SIZE); - return 0; -} - -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M2_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M2, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M2_TAG_SIZE); -} - -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M3, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M3_TAG_SIZE); - return 0; -} - -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M3_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M3, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M3_TAG_SIZE); -} - -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M4, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M4_TAG_SIZE); - return 0; -} - -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M4_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M4, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M4_TAG_SIZE); -} - -/** - * \brief Initialize the key and nonce for SKINNY-128-256 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[32]; - memset(k, 0, 16 - nonce_len); - memcpy(k + 16 - nonce_len, nonce, nonce_len); - memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_256_set_domain(ks,d) ((ks)->TK1[3] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 24-bit LFSR value. - */ -#define skinny_aead_128_256_set_lfsr(ks,lfsr) \ - do { \ - (ks)->TK1[0] = (uint8_t)(lfsr); \ - (ks)->TK1[1] = (uint8_t)((lfsr) >> 8); \ - (ks)->TK1[2] = (uint8_t)((lfsr) >> 16); \ - } while (0) - -/** - * \brief Updates the LFSR value for SKINNY-128-256. - * - * \param lfsr 24-bit LFSR value to be updated. - */ -#define skinny_aead_128_256_update_lfsr(lfsr) \ - do { \ - uint32_t feedback = ((lfsr) & (((uint32_t)1) << 23)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ (feedback); \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_256_authenticate - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - skinny_aead_128_256_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_256_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_256_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M5, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M5_TAG_SIZE); - return 0; -} - -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M5_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M5, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M5_TAG_SIZE); -} - -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M6, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M6_TAG_SIZE); - return 0; -} - -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M6_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M6, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M6_TAG_SIZE); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.h deleted file mode 100644 index c6b54fb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys-avr/skinny-aead.h +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_AEAD_H -#define LWCRYPTO_SKINNY_AEAD_H - -#include "aead-common.h" - -/** - * \file skinny-aead.h - * \brief Authenticated encryption based on the SKINNY block cipher. - * - * SKINNY-AEAD is a family of authenticated encryption algorithms - * that are built around the SKINNY tweakable block cipher. There - * are six members in the family: - * - * \li SKINNY-AEAD-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li SKINNY-AEAD-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M3 has a 128-bit key, a 128-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M4 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M5 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li SKINNY-AEAD-M6 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The SKINNY-AEAD family also includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SKINNY-AEAD family members. - */ -#define SKINNY_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the SKINNY-AEAD-M1 cipher. - */ -extern aead_cipher_t const skinny_aead_m1_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M2 cipher. - */ -extern aead_cipher_t const skinny_aead_m2_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M3 cipher. - */ -extern aead_cipher_t const skinny_aead_m3_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M4 cipher. - */ -extern aead_cipher_t const skinny_aead_m4_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M5 cipher. - */ -extern aead_cipher_t const skinny_aead_m5_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M6 cipher. - */ -extern aead_cipher_t const skinny_aead_m6_cipher; - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m1_decrypt() - */ -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m1_encrypt() - */ -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m2_decrypt() - */ -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m2_encrypt() - */ -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m3_decrypt() - */ -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m3_encrypt() - */ -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m4_decrypt() - */ -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m4_encrypt() - */ -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m5_decrypt() - */ -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m5_encrypt() - */ -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m6_decrypt() - */ -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m6_encrypt() - */ -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-util.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/skinny-aead.c index 2bb37e9..7558527 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/skinny-aead.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk396128v1/rhys/skinny-aead.c @@ -105,11 +105,12 @@ static void skinny_aead_128_384_init (skinny_128_384_key_schedule_t *ks, const unsigned char *key, const unsigned char *nonce, unsigned nonce_len) { - unsigned char k[32]; - memcpy(k, nonce, nonce_len); - memset(k + nonce_len, 0, 16 - nonce_len); - memcpy(k + 16, key, 16); - skinny_128_384_init(ks, k, 32); + unsigned char k[48]; + memset(k, 0, 16); + memcpy(k + 16, nonce, nonce_len); + memset(k + 16 + nonce_len, 0, 16 - nonce_len); + memcpy(k + 32, key, 16); + skinny_128_384_init(ks, k); } /** @@ -136,7 +137,7 @@ static void skinny_aead_128_384_init #define skinny_aead_128_384_update_lfsr(lfsr) \ do { \ uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) | feedback; \ + (lfsr) = ((lfsr) << 1) ^ feedback; \ } while (0) /** @@ -520,7 +521,7 @@ static void skinny_aead_128_256_init memset(k, 0, 16 - nonce_len); memcpy(k + 16 - nonce_len, nonce, nonce_len); memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k, 32); + skinny_128_256_init(ks, k); } /** diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.c b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/api.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/api.h deleted file mode 100644 index 32c9622..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/encrypt.c b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/encrypt.c deleted file mode 100644 index 0623826..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "skinny-aead.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m4_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return skinny_aead_m4_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.c deleted file mode 100644 index 7558527..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.c +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-aead.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_cipher_t const skinny_aead_m1_cipher = { - "SKINNY-AEAD-M1", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M1_NONCE_SIZE, - SKINNY_AEAD_M1_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m1_encrypt, - skinny_aead_m1_decrypt -}; - -aead_cipher_t const skinny_aead_m2_cipher = { - "SKINNY-AEAD-M2", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M2_NONCE_SIZE, - SKINNY_AEAD_M2_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m2_encrypt, - skinny_aead_m2_decrypt -}; - -aead_cipher_t const skinny_aead_m3_cipher = { - "SKINNY-AEAD-M3", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M3_NONCE_SIZE, - SKINNY_AEAD_M3_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m3_encrypt, - skinny_aead_m3_decrypt -}; - -aead_cipher_t const skinny_aead_m4_cipher = { - "SKINNY-AEAD-M4", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M4_NONCE_SIZE, - SKINNY_AEAD_M4_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m4_encrypt, - skinny_aead_m4_decrypt -}; - -aead_cipher_t const skinny_aead_m5_cipher = { - "SKINNY-AEAD-M5", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M5_NONCE_SIZE, - SKINNY_AEAD_M5_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m5_encrypt, - skinny_aead_m5_decrypt -}; - -aead_cipher_t const skinny_aead_m6_cipher = { - "SKINNY-AEAD-M6", - SKINNY_AEAD_KEY_SIZE, - SKINNY_AEAD_M6_NONCE_SIZE, - SKINNY_AEAD_M6_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - skinny_aead_m6_encrypt, - skinny_aead_m6_decrypt -}; - -/* Domain separator prefixes for all of the SKINNY-AEAD family members */ -#define DOMAIN_SEP_M1 0x00 -#define DOMAIN_SEP_M2 0x10 -#define DOMAIN_SEP_M3 0x08 -#define DOMAIN_SEP_M4 0x18 -#define DOMAIN_SEP_M5 0x10 -#define DOMAIN_SEP_M6 0x18 - -/** - * \brief Initialize the key and nonce for SKINNY-128-384 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[48]; - memset(k, 0, 16); - memcpy(k + 16, nonce, nonce_len); - memset(k + 16 + nonce_len, 0, 16 - nonce_len); - memcpy(k + 32, key, 16); - skinny_128_384_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_384_set_domain(ks,d) ((ks)->TK1[15] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-384. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 64-bit LFSR value. - */ -#define skinny_aead_128_384_set_lfsr(ks,lfsr) le_store_word64((ks)->TK1, (lfsr)) - -/** - * \brief Updates the LFSR value for SKINNY-128-384. - * - * \param lfsr 64-bit LFSR value to be updated. - */ -#define skinny_aead_128_384_update_lfsr(lfsr) \ - do { \ - uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ feedback; \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_384_authenticate - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - skinny_aead_128_384_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_384_encrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-384 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_384_decrypt - (skinny_128_384_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint64_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_384_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_128_384_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_384_update_lfsr(lfsr); - } - skinny_aead_128_384_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_384_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_384_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_384_update_lfsr(lfsr); - skinny_aead_128_384_set_lfsr(ks, lfsr); - skinny_aead_128_384_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_384_set_domain(ks, prefix | 4); - } - skinny_128_384_encrypt(ks, sum, sum); -} - -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M1, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M1_TAG_SIZE); - return 0; -} - -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M1_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M1_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M1_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M1, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M1, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M1_TAG_SIZE); -} - -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M2, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M2_TAG_SIZE); - return 0; -} - -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M2_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M2_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M2_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M2, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M2, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M2_TAG_SIZE); -} - -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M3, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M3_TAG_SIZE); - return 0; -} - -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M3_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M3_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M3_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M3, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M3, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M3_TAG_SIZE); -} - -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_384_encrypt(&ks, DOMAIN_SEP_M4, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M4_TAG_SIZE); - return 0; -} - -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_384_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M4_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M4_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_384_init(&ks, k, npub, SKINNY_AEAD_M4_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_384_decrypt(&ks, DOMAIN_SEP_M4, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_384_authenticate(&ks, DOMAIN_SEP_M4, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M4_TAG_SIZE); -} - -/** - * \brief Initialize the key and nonce for SKINNY-128-256 based AEAD schemes. - * - * \param ks The key schedule to initialize. - * \param key Points to the 16 bytes of the key. - * \param nonce Points to the nonce. - * \param nonce_len Length of the nonce in bytes. - */ -static void skinny_aead_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - const unsigned char *nonce, unsigned nonce_len) -{ - unsigned char k[32]; - memset(k, 0, 16 - nonce_len); - memcpy(k + 16 - nonce_len, nonce, nonce_len); - memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k); -} - -/** - * \brief Set the domain separation value in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param d Domain separation value to write into the tweak. - */ -#define skinny_aead_128_256_set_domain(ks,d) ((ks)->TK1[3] = (d)) - -/** - * \brief Sets the LFSR field in the tweak for SKINNY-128-256. - * - * \param ks Key schedule for the block cipher. - * \param lfsr 24-bit LFSR value. - */ -#define skinny_aead_128_256_set_lfsr(ks,lfsr) \ - do { \ - (ks)->TK1[0] = (uint8_t)(lfsr); \ - (ks)->TK1[1] = (uint8_t)((lfsr) >> 8); \ - (ks)->TK1[2] = (uint8_t)((lfsr) >> 16); \ - } while (0) - -/** - * \brief Updates the LFSR value for SKINNY-128-256. - * - * \param lfsr 24-bit LFSR value to be updated. - */ -#define skinny_aead_128_256_update_lfsr(lfsr) \ - do { \ - uint32_t feedback = ((lfsr) & (((uint32_t)1) << 23)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) ^ (feedback); \ - } while (0) - -/** - * \brief Authenticates the associated data for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param tag Final tag to XOR the authentication checksum into. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void skinny_aead_128_256_authenticate - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char tag[SKINNY_128_BLOCK_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - skinny_aead_128_256_set_domain(ks, prefix | 2); - while (adlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_encrypt(ks, block, ad); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - ad += SKINNY_128_BLOCK_SIZE; - adlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 3); - memcpy(block, ad, temp); - block[temp] = 0x80; - memset(block + temp + 1, 0, SKINNY_128_BLOCK_SIZE - temp - 1); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block(tag, block, SKINNY_128_BLOCK_SIZE); - } -} - -/** - * \brief Encrypts the plaintext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param c Points to the buffer to receive the ciphertext. - * \param m Points to the plaintext buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void skinny_aead_128_256_encrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, c, m); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(c, block, m, temp); - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -/** - * \brief Decrypts the ciphertext for a SKINNY-128-256 based AEAD. - * - * \param ks The key schedule to use. - * \param prefix Domain separation prefix for the family member. - * \param sum Authenticated checksum over the plaintext. - * \param m Points to the buffer to receive the plaintext. - * \param c Points to the ciphertext buffer. - * \param mlen Number of bytes of ciphertext to be decrypted. - */ -static void skinny_aead_128_256_decrypt - (skinny_128_256_key_schedule_t *ks, unsigned char prefix, - unsigned char sum[SKINNY_128_BLOCK_SIZE], unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - unsigned char block[SKINNY_128_BLOCK_SIZE]; - uint32_t lfsr = 1; - memset(sum, 0, SKINNY_128_BLOCK_SIZE); - skinny_aead_128_256_set_domain(ks, prefix | 0); - while (mlen >= SKINNY_128_BLOCK_SIZE) { - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_128_256_decrypt(ks, m, c); - lw_xor_block(sum, m, SKINNY_128_BLOCK_SIZE); - c += SKINNY_128_BLOCK_SIZE; - m += SKINNY_128_BLOCK_SIZE; - mlen -= SKINNY_128_BLOCK_SIZE; - skinny_aead_128_256_update_lfsr(lfsr); - } - skinny_aead_128_256_set_lfsr(ks, lfsr); - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - skinny_aead_128_256_set_domain(ks, prefix | 1); - memset(block, 0, SKINNY_128_BLOCK_SIZE); - skinny_128_256_encrypt(ks, block, block); - lw_xor_block_2_src(m, block, c, temp); - lw_xor_block(sum, m, temp); - sum[temp] ^= 0x80; - skinny_aead_128_256_update_lfsr(lfsr); - skinny_aead_128_256_set_lfsr(ks, lfsr); - skinny_aead_128_256_set_domain(ks, prefix | 5); - } else { - skinny_aead_128_256_set_domain(ks, prefix | 4); - } - skinny_128_256_encrypt(ks, sum, sum); -} - -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M5, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M5_TAG_SIZE); - return 0; -} - -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M5_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M5_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M5_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M5, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M5, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M5_TAG_SIZE); -} - -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Encrypt to plaintext to produce the ciphertext */ - skinny_aead_128_256_encrypt(&ks, DOMAIN_SEP_M6, sum, c, m, mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Generate the authentication tag */ - memcpy(c + mlen, sum, SKINNY_AEAD_M6_TAG_SIZE); - return 0; -} - -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - skinny_128_256_key_schedule_t ks; - unsigned char sum[SKINNY_128_BLOCK_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SKINNY_AEAD_M6_TAG_SIZE) - return -1; - *mlen = clen - SKINNY_AEAD_M6_TAG_SIZE; - - /* Set up the key schedule with the key and the nonce */ - skinny_aead_128_256_init(&ks, k, npub, SKINNY_AEAD_M6_NONCE_SIZE); - - /* Decrypt to ciphertext to produce the plaintext */ - skinny_aead_128_256_decrypt(&ks, DOMAIN_SEP_M6, sum, m, c, *mlen); - - /* Process the associated data */ - skinny_aead_128_256_authenticate(&ks, DOMAIN_SEP_M6, sum, ad, adlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, sum, c + *mlen, SKINNY_AEAD_M6_TAG_SIZE); -} diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.h deleted file mode 100644 index c6b54fb..0000000 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys-avr/skinny-aead.h +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_AEAD_H -#define LWCRYPTO_SKINNY_AEAD_H - -#include "aead-common.h" - -/** - * \file skinny-aead.h - * \brief Authenticated encryption based on the SKINNY block cipher. - * - * SKINNY-AEAD is a family of authenticated encryption algorithms - * that are built around the SKINNY tweakable block cipher. There - * are six members in the family: - * - * \li SKINNY-AEAD-M1 has a 128-bit key, a 128-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. This is the - * primary member of the family. - * \li SKINNY-AEAD-M2 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M3 has a 128-bit key, a 128-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M4 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-384 tweakable block cipher. - * \li SKINNY-AEAD-M5 has a 128-bit key, a 96-bit nonce, and a 128-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * \li SKINNY-AEAD-M6 has a 128-bit key, a 96-bit nonce, and a 64-bit tag, - * based around the SKINNY-128-256 tweakable block cipher. - * - * The SKINNY-AEAD family also includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SKINNY-AEAD family members. - */ -#define SKINNY_AEAD_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M1. - */ -#define SKINNY_AEAD_M1_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M2. - */ -#define SKINNY_AEAD_M2_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M3. - */ -#define SKINNY_AEAD_M3_NONCE_SIZE 16 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M4. - */ -#define SKINNY_AEAD_M4_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M5. - */ -#define SKINNY_AEAD_M5_NONCE_SIZE 12 - -/** - * \brief Size of the authentication tag for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_TAG_SIZE 8 - -/** - * \brief Size of the nonce for SKINNY-AEAD-M6. - */ -#define SKINNY_AEAD_M6_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the SKINNY-AEAD-M1 cipher. - */ -extern aead_cipher_t const skinny_aead_m1_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M2 cipher. - */ -extern aead_cipher_t const skinny_aead_m2_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M3 cipher. - */ -extern aead_cipher_t const skinny_aead_m3_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M4 cipher. - */ -extern aead_cipher_t const skinny_aead_m4_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M5 cipher. - */ -extern aead_cipher_t const skinny_aead_m5_cipher; - -/** - * \brief Meta-information block for the SKINNY-AEAD-M6 cipher. - */ -extern aead_cipher_t const skinny_aead_m6_cipher; - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m1_decrypt() - */ -int skinny_aead_m1_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M1. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m1_encrypt() - */ -int skinny_aead_m1_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m2_decrypt() - */ -int skinny_aead_m2_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M2. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m2_encrypt() - */ -int skinny_aead_m2_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m3_decrypt() - */ -int skinny_aead_m3_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M3. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m3_encrypt() - */ -int skinny_aead_m3_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m4_decrypt() - */ -int skinny_aead_m4_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M4. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m4_encrypt() - */ -int skinny_aead_m4_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m5_decrypt() - */ -int skinny_aead_m5_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M5. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m5_encrypt() - */ -int skinny_aead_m5_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa skinny_aead_m6_decrypt() - */ -int skinny_aead_m6_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SKINNY-AEAD-M6. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa skinny_aead_m6_encrypt() - */ -int skinny_aead_m6_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.c b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.c index 65ba4ed..579ced1 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.c @@ -25,6 +25,8 @@ #include "internal-util.h" #include +#if !defined(__AVR__) + STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) { /* This function is used to fast-forward the TK1 tweak value @@ -55,42 +57,33 @@ STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) ((row3 << 24) & 0xFF000000U); } -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t TK3[4]; uint32_t *schedule; unsigned round; uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || (key_len != 32 && key_len != 48)) - return 0; - +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else /* Set the initial states of TK1, TK2, and TK3 */ - if (key_len == 32) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - TK3[0] = le_load_word32(key + 16); - TK3[1] = le_load_word32(key + 20); - TK3[2] = le_load_word32(key + 24); - TK3[3] = le_load_word32(key + 28); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); /* Set up the key schedule using TK2 and TK3. TK1 is not added * to the key schedule because we will derive that part of the @@ -116,20 +109,7 @@ int skinny_128_384_init skinny128_LFSR3(TK3[0]); skinny128_LFSR3(TK3[1]); } - return 1; -} - -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_384_encrypt @@ -138,7 +118,13 @@ void skinny_128_384_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -148,14 +134,24 @@ void skinny_128_384_encrypt s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -163,8 +159,15 @@ void skinny_128_384_encrypt skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -185,6 +188,16 @@ void skinny_128_384_encrypt /* Permute TK1 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -200,7 +213,13 @@ void skinny_128_384_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -215,15 +234,47 @@ void skinny_128_384_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Permute TK1 to fast-forward it to the end of the key schedule */ skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -240,8 +291,15 @@ void skinny_128_384_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -259,13 +317,18 @@ void skinny_128_384_decrypt } void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2) { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -275,7 +338,7 @@ void skinny_128_384_encrypt_tk2 s2 = le_load_word32(input + 8); s3 = le_load_word32(input + 12); - /* Make a local copy of the tweakable part of the state, TK1/TK2 */ + /* Make a local copy of the tweakable part of the state */ TK1[0] = le_load_word32(ks->TK1); TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); @@ -284,9 +347,15 @@ void skinny_128_384_encrypt_tk2 TK2[1] = le_load_word32(tk2 + 4); TK2[2] = le_load_word32(tk2 + 8); TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); @@ -294,8 +363,15 @@ void skinny_128_384_encrypt_tk2 skinny128_sbox(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -319,6 +395,13 @@ void skinny_128_384_encrypt_tk2 skinny128_permute_tk(TK2); skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -408,33 +491,27 @@ void skinny_128_384_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len) +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) { +#if !SKINNY_128_SMALL_SCHEDULE uint32_t TK2[4]; uint32_t *schedule; unsigned round; uint8_t rc; +#endif - /* Validate the parameters */ - if (!ks || !key || (key_len != 16 && key_len != 32)) - return 0; - +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else /* Set the initial states of TK1 and TK2 */ - if (key_len == 16) { - memset(ks->TK1, 0, sizeof(ks->TK1)); - TK2[0] = le_load_word32(key); - TK2[1] = le_load_word32(key + 4); - TK2[2] = le_load_word32(key + 8); - TK2[3] = le_load_word32(key + 12); - } else { - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - } + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); /* Set up the key schedule using TK2. TK1 is not added * to the key schedule because we will derive that part of the @@ -457,20 +534,7 @@ int skinny_128_256_init skinny128_LFSR2(TK2[0]); skinny128_LFSR2(TK2[1]); } - return 1; -} - -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len) -{ - /* Validate the parameters */ - if (!ks || !tweak || tweak_len != 16) - return 0; - - /* Set TK1 directly from the tweak value */ - memcpy(ks->TK1, tweak, 16); - return 1; +#endif } void skinny_128_256_encrypt @@ -479,7 +543,12 @@ void skinny_128_256_encrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else const uint32_t *schedule = ks->k; +#endif uint32_t temp; unsigned round; @@ -494,18 +563,31 @@ void skinny_128_256_encrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Apply the S-box to all bytes in the state */ skinny128_sbox(s0); skinny128_sbox(s1); skinny128_sbox(s2); skinny128_sbox(s3); - /* Apply the subkey for this round */ + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; +#endif s2 ^= 0x02; /* Shift the cells in the rows right, which moves the cell @@ -524,8 +606,15 @@ void skinny_128_256_encrypt s1 = s0; s0 = temp; - /* Permute TK1 for the next round */ + /* Permute TK1 and TK2 for the next round */ skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif } /* Pack the result into the output buffer */ @@ -541,7 +630,12 @@ void skinny_128_256_decrypt { uint32_t s0, s1, s2, s3; uint32_t TK1[4]; - const uint32_t *schedule; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif uint32_t temp; unsigned round; @@ -558,12 +652,29 @@ void skinny_128_256_decrypt TK1[1] = le_load_word32(ks->TK1 + 4); TK1[2] = le_load_word32(ks->TK1 + 8); TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule -= 2) { + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { /* Inverse permutation on TK1 for this round */ skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif /* Inverse mix of the columns */ temp = s3; @@ -580,8 +691,15 @@ void skinny_128_256_decrypt s3 = leftRotate8(s3); /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else s0 ^= schedule[0] ^ TK1[0]; s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif s2 ^= 0x02; /* Apply the inverse of the S-box to all bytes in the state */ @@ -670,142 +788,14 @@ void skinny_128_256_encrypt_tk_full le_store_word32(output + 12, s3); } -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len) -{ - uint32_t TK1[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; - - /* Validate the parameters */ - if (!ks || !key || key_len != 16) - return 0; - - /* Set the initial state of TK1 */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); +#else /* __AVR__ */ - /* Set up the key schedule using TK1 */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK1[0] ^ (rc & 0x0F); - schedule[1] = TK1[1] ^ (rc >> 4); - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); - } - return 1; -} - -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) { - uint32_t s0, s1, s2, s3; - const uint32_t *schedule = ks->k; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule += 2) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); } -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - const uint32_t *schedule; - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all decryption rounds */ - schedule = &(ks->k[SKINNY_128_128_ROUNDS * 2 - 2]); - for (round = 0; round < SKINNY_128_128_ROUNDS; ++round, schedule -= 2) { - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ - s0 ^= schedule[0]; - s1 ^= schedule[1]; - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.h index 76b34f5..2bfda3c 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-skinny128.h @@ -39,6 +39,16 @@ extern "C" { #endif /** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** * \brief Size of a block for SKINNY-128 block ciphers. */ #define SKINNY_128_BLOCK_SIZE 16 @@ -56,8 +66,16 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif } skinny_128_384_key_schedule_t; @@ -66,29 +84,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 32 or 48, - * where 32 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_384_set_tweak - (skinny_128_384_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); /** * \brief Encrypts a 128-bit block with SKINNY-128-384. @@ -133,9 +131,12 @@ void skinny_128_384_decrypt * This version is useful when both TK1 and TK2 change from block to block. * When the key is initialized with skinny_128_384_init(), the TK2 part of * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. */ void skinny_128_384_encrypt_tk2 - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + (skinny_128_384_key_schedule_t *ks, unsigned char *output, const unsigned char *input, const unsigned char *tk2); /** @@ -170,8 +171,13 @@ typedef struct /** TK1 for the tweakable part of the key schedule */ uint8_t TK1[16]; - /** Words of the key schedule */ +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif } skinny_128_256_key_schedule_t; @@ -180,29 +186,9 @@ typedef struct * * \param ks Points to the key schedule to initialize. * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16 or 32, - * where 16 is used for the tweakable variant. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. */ -int skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Sets the tweakable part of the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to modify. - * \param tweak Points to the tweak data. - * \param tweak_len Length of the tweak data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_256_set_tweak - (skinny_128_256_key_schedule_t *ks, const unsigned char *tweak, - size_t tweak_len); +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); /** * \brief Encrypts a 128-bit block with SKINNY-128-256. @@ -251,63 +237,6 @@ void skinny_128_256_encrypt_tk_full (const unsigned char key[32], unsigned char *output, const unsigned char *input); -/** - * \brief Number of rounds for SKINNY-128-128. - */ -#define SKINNY_128_128_ROUNDS 40 - -/** - * \brief Structure of the key schedule for SKINNY-128-128. - */ -typedef struct -{ - /** Words of the key schedule */ - uint32_t k[SKINNY_128_128_ROUNDS * 2]; - -} skinny_128_128_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-128. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. - */ -int skinny_128_128_init - (skinny_128_128_key_schedule_t *ks, const unsigned char *key, - size_t key_len); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_encrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-128. - * - * \param ks Points to the SKINNY-128-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_128_decrypt - (const skinny_128_128_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - #ifdef __cplusplus } #endif diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-util.h b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-util.h +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/skinny-aead.c b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/skinny-aead.c index 2bb37e9..7558527 100644 --- a/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/skinny-aead.c +++ b/skinny/Implementations/crypto_aead/skinnyaeadtk39664v1/rhys/skinny-aead.c @@ -105,11 +105,12 @@ static void skinny_aead_128_384_init (skinny_128_384_key_schedule_t *ks, const unsigned char *key, const unsigned char *nonce, unsigned nonce_len) { - unsigned char k[32]; - memcpy(k, nonce, nonce_len); - memset(k + nonce_len, 0, 16 - nonce_len); - memcpy(k + 16, key, 16); - skinny_128_384_init(ks, k, 32); + unsigned char k[48]; + memset(k, 0, 16); + memcpy(k + 16, nonce, nonce_len); + memset(k + 16 + nonce_len, 0, 16 - nonce_len); + memcpy(k + 32, key, 16); + skinny_128_384_init(ks, k); } /** @@ -136,7 +137,7 @@ static void skinny_aead_128_384_init #define skinny_aead_128_384_update_lfsr(lfsr) \ do { \ uint8_t feedback = ((lfsr) & (1ULL << 63)) ? 0x1B : 0x00; \ - (lfsr) = ((lfsr) << 1) | feedback; \ + (lfsr) = ((lfsr) << 1) ^ feedback; \ } while (0) /** @@ -520,7 +521,7 @@ static void skinny_aead_128_256_init memset(k, 0, 16 - nonce_len); memcpy(k + 16 - nonce_len, nonce, nonce_len); memcpy(k + 16, key, 16); - skinny_128_256_init(ks, k, 32); + skinny_128_256_init(ks, k); } /** diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/api.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/hash.c deleted file mode 100644 index e0118e9..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "skinny-hash.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return skinny_tk2_hash(out, in, inlen); -} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-util.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.c deleted file mode 100644 index 0abdeff..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-hash.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_hash_algorithm_t const skinny_tk3_hash_algorithm = { - "SKINNY-tk3-HASH", - sizeof(int), - SKINNY_HASH_SIZE, - AEAD_FLAG_NONE, - skinny_tk3_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const skinny_tk2_hash_algorithm = { - "SKINNY-tk2-HASH", - sizeof(int), - SKINNY_HASH_SIZE, - AEAD_FLAG_NONE, - skinny_tk2_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Size of the permutation state for SKINNY-tk3-HASH. - */ -#define SKINNY_TK3_STATE_SIZE 48 - -/** - * \brief Size of the permutation state for SKINNY-tk2-HASH. - */ -#define SKINNY_TK2_STATE_SIZE 32 - -/** - * \brief Rate of absorbing data for SKINNY-tk3-HASH. - */ -#define SKINNY_TK3_HASH_RATE 16 - -/** - * \brief Rate of absorbing data for SKINNY-tk2-HASH. - */ -#define SKINNY_TK2_HASH_RATE 4 - -/** - * \brief Input block that is encrypted with the state for each - * block permutation of SKINNY-tk3-HASH or SKINNY-tk2-HASH. - */ -static unsigned char const skinny_hash_block[48] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -}; - -/** - * \brief Permutes the internal state for SKINNY-tk3-HASH. - * - * \param state The state to be permuted. - */ -static void skinny_tk3_permute(unsigned char state[SKINNY_TK3_STATE_SIZE]) -{ - unsigned char temp[SKINNY_TK3_STATE_SIZE]; - skinny_128_384_encrypt_tk_full(state, temp, skinny_hash_block); - skinny_128_384_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); - skinny_128_384_encrypt_tk_full(state, temp + 32, skinny_hash_block + 32); - memcpy(state, temp, SKINNY_TK3_STATE_SIZE); -} - -/** - * \brief Permutes the internal state for SKINNY-tk2-HASH. - * - * \param state The state to be permuted. - */ -static void skinny_tk2_permute(unsigned char state[SKINNY_TK2_STATE_SIZE]) -{ - unsigned char temp[SKINNY_TK2_STATE_SIZE]; - skinny_128_256_encrypt_tk_full(state, temp, skinny_hash_block); - skinny_128_256_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); - memcpy(state, temp, SKINNY_TK2_STATE_SIZE); -} - -int skinny_tk3_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[SKINNY_TK3_STATE_SIZE]; - unsigned temp; - - /* Initialize the hash state */ - memset(state, 0, sizeof(state)); - state[SKINNY_TK3_HASH_RATE] = 0x80; - - /* Process as many full blocks as possible */ - while (inlen >= SKINNY_TK3_HASH_RATE) { - lw_xor_block(state, in, SKINNY_TK3_HASH_RATE); - skinny_tk3_permute(state); - in += SKINNY_TK3_HASH_RATE; - inlen -= SKINNY_TK3_HASH_RATE; - } - - /* Pad and process the last block */ - temp = (unsigned)inlen; - lw_xor_block(state, in, temp); - state[temp] ^= 0x80; /* padding */ - skinny_tk3_permute(state); - - /* Generate the hash output */ - memcpy(out, state, 16); - skinny_tk3_permute(state); - memcpy(out + 16, state, 16); - return 0; -} - -int skinny_tk2_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[SKINNY_TK2_STATE_SIZE]; - unsigned temp; - - /* Initialize the hash state */ - memset(state, 0, sizeof(state)); - state[SKINNY_TK2_HASH_RATE] = 0x80; - - /* Process as many full blocks as possible */ - while (inlen >= SKINNY_TK2_HASH_RATE) { - lw_xor_block(state, in, SKINNY_TK2_HASH_RATE); - skinny_tk2_permute(state); - in += SKINNY_TK2_HASH_RATE; - inlen -= SKINNY_TK2_HASH_RATE; - } - - /* Pad and process the last block */ - temp = (unsigned)inlen; - lw_xor_block(state, in, temp); - state[temp] ^= 0x80; /* padding */ - skinny_tk2_permute(state); - - /* Generate the hash output */ - memcpy(out, state, 16); - skinny_tk2_permute(state); - memcpy(out + 16, state, 16); - return 0; -} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.h deleted file mode 100644 index f75ce9f..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys-avr/skinny-hash.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_HASH_H -#define LWCRYPTO_SKINNY_HASH_H - -#include "aead-common.h" - -/** - * \file skinny-hash.h - * \brief Hash algorithms based on the SKINNY block cipher. - * - * The SKINNY-AEAD family includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the hash output for SKINNY-tk3-HASH and SKINNY-tk2-HASH. - */ -#define SKINNY_HASH_SIZE 32 - -/** - * \brief Meta-information block for the SKINNY-tk3-HASH algorithm. - */ -extern aead_hash_algorithm_t const skinny_tk3_hash_algorithm; - -/** - * \brief Meta-information block for the SKINNY-tk2-HASH algorithm. - */ -extern aead_hash_algorithm_t const skinny_tk2_hash_algorithm; - -/** - * \brief Hashes a block of input data with SKINNY-tk3-HASH to - * generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * SKINNY_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int skinny_tk3_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with SKINNY-tk2-HASH to - * generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * SKINNY_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int skinny_tk2_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/api.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/hash.c new file mode 100644 index 0000000..e0118e9 --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "skinny-hash.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return skinny_tk2_hash(out, in, inlen); +} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.c new file mode 100644 index 0000000..579ced1 --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.c @@ -0,0 +1,801 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-skinny128.h" +#include "internal-skinnyutil.h" +#include "internal-util.h" +#include + +#if !defined(__AVR__) + +STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) +{ + /* This function is used to fast-forward the TK1 tweak value + * to the value at the end of the key schedule for decryption. + * + * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 + * with 48 rounds does not need any fast forwarding applied. + * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds + * are equivalent to applying the permutation 8 times: + * + * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] + */ + uint32_t row0 = tk[0]; + uint32_t row1 = tk[1]; + uint32_t row2 = tk[2]; + uint32_t row3 = tk[3]; + tk[0] = ((row1 >> 8) & 0x0000FFFFU) | + ((row0 >> 8) & 0x00FF0000U) | + ((row0 << 8) & 0xFF000000U); + tk[1] = ((row1 >> 24) & 0x000000FFU) | + ((row0 << 8) & 0x00FFFF00U) | + ((row1 << 24) & 0xFF000000U); + tk[2] = ((row3 >> 8) & 0x0000FFFFU) | + ((row2 >> 8) & 0x00FF0000U) | + ((row2 << 8) & 0xFF000000U); + tk[3] = ((row3 >> 24) & 0x000000FFU) | + ((row2 << 8) & 0x00FFFF00U) | + ((row3 << 24) & 0xFF000000U); +} + +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) +{ +#if !SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint32_t *schedule; + unsigned round; + uint8_t rc; +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else + /* Set the initial states of TK1, TK2, and TK3 */ + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); + + /* Set up the key schedule using TK2 and TK3. TK1 is not added + * to the key schedule because we will derive that part of the + * schedule during encryption operations */ + schedule = ks->k; + rc = 0; + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + /* XOR the round constants with the current schedule words. + * The round constants for the 3rd and 4th rows are + * fixed and will be applied during encryption. */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); + schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); + + /* Permute TK2 and TK3 for the next round */ + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + + /* Apply the LFSR's to TK2 and TK3 */ + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + } +#endif +} + +void skinny_128_384_encrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else + const uint32_t *schedule = ks->k; +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; +#endif + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 for the next round */ + skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_384_decrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state, TK1 */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif + + /* Permute TK1 to fast-forward it to the end of the key schedule */ + skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif + + /* Perform all decryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Inverse permutation on TK1 for this round */ + skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif + + /* Inverse mix of the columns */ + temp = s3; + s3 = s0; + s0 = s1; + s1 = s2; + s3 ^= temp; + s2 = temp ^ s0; + s1 ^= s2; + + /* Inverse shift of the rows */ + s1 = leftRotate24(s1); + s2 = leftRotate16(s2); + s3 = leftRotate8(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif + s2 ^= 0x02; + + /* Apply the inverse of the S-box to all bytes in the state */ + skinny128_inv_sbox(s0); + skinny128_inv_sbox(s1); + skinny128_inv_sbox(s2); + skinny128_inv_sbox(s3); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; + uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else + const uint32_t *schedule = ks->k; +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); + TK2[0] = le_load_word32(tk2); + TK2[1] = le_load_word32(tk2 + 4); + TK2[2] = le_load_word32(tk2 + 8); + TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; + s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 and TK2 for the next round */ + skinny128_permute_tk(TK1); + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_384_encrypt_tk_full + (const unsigned char key[48], unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; + uint32_t TK2[4]; + uint32_t TK3[4]; + uint32_t temp; + unsigned round; + uint8_t rc = 0; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakey */ + TK1[0] = le_load_word32(key); + TK1[1] = le_load_word32(key + 4); + TK1[2] = le_load_word32(key + 8); + TK1[3] = le_load_word32(key + 12); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* XOR the round constant and the subkey for this round */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1, TK2, and TK3 for the next round */ + skinny128_permute_tk(TK1); + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) +{ +#if !SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t *schedule; + unsigned round; + uint8_t rc; +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else + /* Set the initial states of TK1 and TK2 */ + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + + /* Set up the key schedule using TK2. TK1 is not added + * to the key schedule because we will derive that part of the + * schedule during encryption operations */ + schedule = ks->k; + rc = 0; + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + /* XOR the round constants with the current schedule words. + * The round constants for the 3rd and 4th rows are + * fixed and will be applied during encryption. */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + schedule[0] = TK2[0] ^ (rc & 0x0F); + schedule[1] = TK2[1] ^ (rc >> 4); + + /* Permute TK2 for the next round */ + skinny128_permute_tk(TK2); + + /* Apply the LFSR to TK2 */ + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + } +#endif +} + +void skinny_128_256_encrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else + const uint32_t *schedule = ks->k; +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state, TK1 */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; +#endif + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 and TK2 for the next round */ + skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_256_decrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state, TK1. + * There is no need to fast-forward TK1 because the value at + * the end of the key schedule is the same as at the start */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif + + /* Perform all decryption rounds */ + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { + /* Inverse permutation on TK1 for this round */ + skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif + + /* Inverse mix of the columns */ + temp = s3; + s3 = s0; + s0 = s1; + s1 = s2; + s3 ^= temp; + s2 = temp ^ s0; + s1 ^= s2; + + /* Inverse shift of the rows */ + s1 = leftRotate24(s1); + s2 = leftRotate16(s2); + s3 = leftRotate8(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif + s2 ^= 0x02; + + /* Apply the inverse of the S-box to all bytes in the state */ + skinny128_inv_sbox(s0); + skinny128_inv_sbox(s1); + skinny128_inv_sbox(s2); + skinny128_inv_sbox(s3); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_256_encrypt_tk_full + (const unsigned char key[32], unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; + uint32_t TK2[4]; + uint32_t temp; + unsigned round; + uint8_t rc = 0; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakey */ + TK1[0] = le_load_word32(key); + TK1[1] = le_load_word32(key + 4); + TK1[2] = le_load_word32(key + 8); + TK1[3] = le_load_word32(key + 12); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* XOR the round constant and the subkey for this round */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 and TK2 for the next round */ + skinny128_permute_tk(TK1); + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +#else /* __AVR__ */ + +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) +{ + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); +} + +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.h new file mode 100644 index 0000000..2bfda3c --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinny128.h @@ -0,0 +1,244 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SKINNY128_H +#define LW_INTERNAL_SKINNY128_H + +/** + * \file internal-skinny128.h + * \brief SKINNY-128 block cipher family. + * + * References: https://eprint.iacr.org/2016/660.pdf, + * https://sites.google.com/site/skinnycipher/ + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** + * \brief Size of a block for SKINNY-128 block ciphers. + */ +#define SKINNY_128_BLOCK_SIZE 16 + +/** + * \brief Number of rounds for SKINNY-128-384. + */ +#define SKINNY_128_384_ROUNDS 56 + +/** + * \brief Structure of the key schedule for SKINNY-128-384. + */ +typedef struct +{ + /** TK1 for the tweakable part of the key schedule */ + uint8_t TK1[16]; + +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ + uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif + +} skinny_128_384_key_schedule_t; + +/** + * \brief Initializes the key schedule for SKINNY-128-384. + * + * \param ks Points to the key schedule to initialize. + * \param key Points to the key data. + */ +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-384. + * + * \param ks Points to the SKINNY-128-384 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_384_encrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Decrypts a 128-bit block with SKINNY-128-384. + * + * \param ks Points to the SKINNY-128-384 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_384_decrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly + * provided TK2 value. + * + * \param ks Points to the SKINNY-128-384 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * \param tk2 TK2 value that should be updated on the fly. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * This version is useful when both TK1 and TK2 change from block to block. + * When the key is initialized with skinny_128_384_init(), the TK2 part of + * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. + */ +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-384 and a + * fully specified tweakey value. + * + * \param key Points to the 384-bit tweakey value. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * This version is useful when the entire tweakey changes from block to + * block. It is slower than the other versions of SKINNY-128-384 but + * more memory-efficient. + */ +void skinny_128_384_encrypt_tk_full + (const unsigned char key[48], unsigned char *output, + const unsigned char *input); + +/** + * \brief Number of rounds for SKINNY-128-256. + */ +#define SKINNY_128_256_ROUNDS 48 + +/** + * \brief Structure of the key schedule for SKINNY-128-256. + */ +typedef struct +{ + /** TK1 for the tweakable part of the key schedule */ + uint8_t TK1[16]; + +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ + uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif + +} skinny_128_256_key_schedule_t; + +/** + * \brief Initializes the key schedule for SKINNY-128-256. + * + * \param ks Points to the key schedule to initialize. + * \param key Points to the key data. + */ +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-256. + * + * \param ks Points to the SKINNY-128-256 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_256_encrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Decrypts a 128-bit block with SKINNY-128-256. + * + * \param ks Points to the SKINNY-128-256 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_256_decrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-256 and a + * fully specified tweakey value. + * + * \param key Points to the 256-bit tweakey value. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * This version is useful when the entire tweakey changes from block to + * block. It is slower than the other versions of SKINNY-128-256 but + * more memory-efficient. + */ +void skinny_128_256_encrypt_tk_full + (const unsigned char key[32], unsigned char *output, + const unsigned char *input); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinnyutil.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinnyutil.h new file mode 100644 index 0000000..83136cb --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-skinnyutil.h @@ -0,0 +1,328 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SKINNYUTIL_H +#define LW_INTERNAL_SKINNYUTIL_H + +/** + * \file internal-skinnyutil.h + * \brief Utilities to help implement SKINNY and its variants. + */ + +#include "internal-util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond skinnyutil */ + +/* Utilities for implementing SKINNY-128 */ + +#define skinny128_LFSR2(x) \ + do { \ + uint32_t _x = (x); \ + (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ + (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ + } while (0) + + +#define skinny128_LFSR3(x) \ + do { \ + uint32_t _x = (x); \ + (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ + (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ + } while (0) + +/* LFSR2 and LFSR3 are inverses of each other */ +#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) +#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) + +#define skinny128_permute_tk(tk) \ + do { \ + /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ + uint32_t row2 = tk[2]; \ + uint32_t row3 = tk[3]; \ + tk[2] = tk[0]; \ + tk[3] = tk[1]; \ + row3 = (row3 << 16) | (row3 >> 16); \ + tk[0] = ((row2 >> 8) & 0x000000FFU) | \ + ((row2 << 16) & 0x00FF0000U) | \ + ( row3 & 0xFF00FF00U); \ + tk[1] = ((row2 >> 16) & 0x000000FFU) | \ + (row2 & 0xFF000000U) | \ + ((row3 << 8) & 0x0000FF00U) | \ + ( row3 & 0x00FF0000U); \ + } while (0) + +#define skinny128_inv_permute_tk(tk) \ + do { \ + /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ + uint32_t row0 = tk[0]; \ + uint32_t row1 = tk[1]; \ + tk[0] = tk[2]; \ + tk[1] = tk[3]; \ + tk[2] = ((row0 >> 16) & 0x000000FFU) | \ + ((row0 << 8) & 0x0000FF00U) | \ + ((row1 << 16) & 0x00FF0000U) | \ + ( row1 & 0xFF000000U); \ + tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ + ((row0 << 16) & 0xFF000000U) | \ + ((row1 >> 16) & 0x000000FFU) | \ + ((row1 << 8) & 0x00FF0000U); \ + } while (0) + +/* + * Apply the SKINNY sbox. The original version from the specification is + * equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) + * #define SBOX_SWAP(x) + * (((x) & 0xF9F9F9F9U) | + * (((x) >> 1) & 0x02020202U) | + * (((x) << 1) & 0x04040404U)) + * #define SBOX_PERMUTE(x) + * ((((x) & 0x01010101U) << 2) | + * (((x) & 0x06060606U) << 5) | + * (((x) & 0x20202020U) >> 5) | + * (((x) & 0xC8C8C8C8U) >> 2) | + * (((x) & 0x10101010U) >> 1)) + * + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE(x); + * x = SBOX_MIX(x); + * return SBOX_SWAP(x); + * + * However, we can mix the bits in their original positions and then + * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one + * final permuatation. This reduces the number of shift operations. + */ +#define skinny128_sbox(x) \ +do { \ + uint32_t y; \ + \ + /* Mix the bits */ \ + x = ~x; \ + x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ + y = (((x << 5) & (x << 1)) & 0x20202020U); \ + x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ + y = (((x << 2) & (x << 1)) & 0x80808080U); \ + x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ + y = (((x >> 5) & (x << 1)) & 0x04040404U); \ + x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ + x = ~x; \ + \ + /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ + /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ + x = ((x & 0x08080808U) << 1) | \ + ((x & 0x32323232U) << 2) | \ + ((x & 0x01010101U) << 5) | \ + ((x & 0x80808080U) >> 6) | \ + ((x & 0x40404040U) >> 4) | \ + ((x & 0x04040404U) >> 2); \ +} while (0) + +/* + * Apply the inverse of the SKINNY sbox. The original version from the + * specification is equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) + * #define SBOX_SWAP(x) + * (((x) & 0xF9F9F9F9U) | + * (((x) >> 1) & 0x02020202U) | + * (((x) << 1) & 0x04040404U)) + * #define SBOX_PERMUTE_INV(x) + * ((((x) & 0x08080808U) << 1) | + * (((x) & 0x32323232U) << 2) | + * (((x) & 0x01010101U) << 5) | + * (((x) & 0xC0C0C0C0U) >> 5) | + * (((x) & 0x04040404U) >> 2)) + * + * x = SBOX_SWAP(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE_INV(x); + * return SBOX_MIX(x); + * + * However, we can mix the bits in their original positions and then + * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one + * final permuatation. This reduces the number of shift operations. + */ +#define skinny128_inv_sbox(x) \ +do { \ + uint32_t y; \ + \ + /* Mix the bits */ \ + x = ~x; \ + y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ + x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ + y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ + x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ + y = (((x << 2) & (x << 1)) & 0x80808080U); \ + x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ + y = (((x << 5) & (x << 1)) & 0x20202020U); \ + x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ + x = ~x; \ + \ + /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ + /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ + x = ((x & 0x01010101U) << 2) | \ + ((x & 0x04040404U) << 4) | \ + ((x & 0x02020202U) << 6) | \ + ((x & 0x20202020U) >> 5) | \ + ((x & 0xC8C8C8C8U) >> 2) | \ + ((x & 0x10101010U) >> 1); \ +} while (0) + +/* Utilities for implementing SKINNY-64 */ + +#define skinny64_LFSR2(x) \ + do { \ + uint16_t _x = (x); \ + (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ + } while (0) + +#define skinny64_LFSR3(x) \ + do { \ + uint16_t _x = (x); \ + (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ + } while (0) + +/* LFSR2 and LFSR3 are inverses of each other */ +#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) +#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) + +#define skinny64_permute_tk(tk) \ + do { \ + /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ + uint16_t row2 = tk[2]; \ + uint16_t row3 = tk[3]; \ + tk[2] = tk[0]; \ + tk[3] = tk[1]; \ + row3 = (row3 << 8) | (row3 >> 8); \ + tk[0] = ((row2 << 4) & 0xF000U) | \ + ((row2 >> 8) & 0x00F0U) | \ + ( row3 & 0x0F0FU); \ + tk[1] = ((row2 << 8) & 0xF000U) | \ + ((row3 >> 4) & 0x0F00U) | \ + ( row3 & 0x00F0U) | \ + ( row2 & 0x000FU); \ + } while (0) + +#define skinny64_inv_permute_tk(tk) \ + do { \ + /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ + uint16_t row0 = tk[0]; \ + uint16_t row1 = tk[1]; \ + tk[0] = tk[2]; \ + tk[1] = tk[3]; \ + tk[2] = ((row0 << 8) & 0xF000U) | \ + ((row0 >> 4) & 0x0F00U) | \ + ((row1 >> 8) & 0x00F0U) | \ + ( row1 & 0x000FU); \ + tk[3] = ((row1 << 8) & 0xF000U) | \ + ((row0 << 8) & 0x0F00U) | \ + ((row1 >> 4) & 0x00F0U) | \ + ((row0 >> 8) & 0x000FU); \ + } while (0) + +/* + * Apply the SKINNY-64 sbox. The original version from the + * specification is equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) + * #define SBOX_SHIFT(x) + * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) + * + * x = SBOX_MIX(x); + * x = SBOX_SHIFT(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT(x); + * return SBOX_MIX(x); + * + * However, we can mix the bits in their original positions and then + * delay the SBOX_SHIFT steps to be performed with one final rotation. + * This reduces the number of required shift operations from 14 to 10. + * + * We can further reduce the number of NOT operations from 4 to 2 + * using the technique from https://github.com/kste/skinny_avx to + * convert NOR-XOR operations into AND-XOR operations by converting + * the S-box into its NOT-inverse. + */ +#define skinny64_sbox(x) \ +do { \ + x = ~x; \ + x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ + x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ + x = ~x; \ + x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ +} while (0) + +/* + * Apply the inverse of the SKINNY-64 sbox. The original version + * from the specification is equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) + * #define SBOX_SHIFT_INV(x) + * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) + * + * x = SBOX_MIX(x); + * x = SBOX_SHIFT_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT_INV(x); + * return SBOX_MIX(x); + */ +#define skinny64_inv_sbox(x) \ +do { \ + x = ~x; \ + x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ + x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ + x = ~x; \ + x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ +} while (0) + +/** @endcond */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-util.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.c new file mode 100644 index 0000000..0abdeff --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.c @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "skinny-hash.h" +#include "internal-skinny128.h" +#include "internal-util.h" +#include + +aead_hash_algorithm_t const skinny_tk3_hash_algorithm = { + "SKINNY-tk3-HASH", + sizeof(int), + SKINNY_HASH_SIZE, + AEAD_FLAG_NONE, + skinny_tk3_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const skinny_tk2_hash_algorithm = { + "SKINNY-tk2-HASH", + sizeof(int), + SKINNY_HASH_SIZE, + AEAD_FLAG_NONE, + skinny_tk2_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Size of the permutation state for SKINNY-tk3-HASH. + */ +#define SKINNY_TK3_STATE_SIZE 48 + +/** + * \brief Size of the permutation state for SKINNY-tk2-HASH. + */ +#define SKINNY_TK2_STATE_SIZE 32 + +/** + * \brief Rate of absorbing data for SKINNY-tk3-HASH. + */ +#define SKINNY_TK3_HASH_RATE 16 + +/** + * \brief Rate of absorbing data for SKINNY-tk2-HASH. + */ +#define SKINNY_TK2_HASH_RATE 4 + +/** + * \brief Input block that is encrypted with the state for each + * block permutation of SKINNY-tk3-HASH or SKINNY-tk2-HASH. + */ +static unsigned char const skinny_hash_block[48] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/** + * \brief Permutes the internal state for SKINNY-tk3-HASH. + * + * \param state The state to be permuted. + */ +static void skinny_tk3_permute(unsigned char state[SKINNY_TK3_STATE_SIZE]) +{ + unsigned char temp[SKINNY_TK3_STATE_SIZE]; + skinny_128_384_encrypt_tk_full(state, temp, skinny_hash_block); + skinny_128_384_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); + skinny_128_384_encrypt_tk_full(state, temp + 32, skinny_hash_block + 32); + memcpy(state, temp, SKINNY_TK3_STATE_SIZE); +} + +/** + * \brief Permutes the internal state for SKINNY-tk2-HASH. + * + * \param state The state to be permuted. + */ +static void skinny_tk2_permute(unsigned char state[SKINNY_TK2_STATE_SIZE]) +{ + unsigned char temp[SKINNY_TK2_STATE_SIZE]; + skinny_128_256_encrypt_tk_full(state, temp, skinny_hash_block); + skinny_128_256_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); + memcpy(state, temp, SKINNY_TK2_STATE_SIZE); +} + +int skinny_tk3_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char state[SKINNY_TK3_STATE_SIZE]; + unsigned temp; + + /* Initialize the hash state */ + memset(state, 0, sizeof(state)); + state[SKINNY_TK3_HASH_RATE] = 0x80; + + /* Process as many full blocks as possible */ + while (inlen >= SKINNY_TK3_HASH_RATE) { + lw_xor_block(state, in, SKINNY_TK3_HASH_RATE); + skinny_tk3_permute(state); + in += SKINNY_TK3_HASH_RATE; + inlen -= SKINNY_TK3_HASH_RATE; + } + + /* Pad and process the last block */ + temp = (unsigned)inlen; + lw_xor_block(state, in, temp); + state[temp] ^= 0x80; /* padding */ + skinny_tk3_permute(state); + + /* Generate the hash output */ + memcpy(out, state, 16); + skinny_tk3_permute(state); + memcpy(out + 16, state, 16); + return 0; +} + +int skinny_tk2_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char state[SKINNY_TK2_STATE_SIZE]; + unsigned temp; + + /* Initialize the hash state */ + memset(state, 0, sizeof(state)); + state[SKINNY_TK2_HASH_RATE] = 0x80; + + /* Process as many full blocks as possible */ + while (inlen >= SKINNY_TK2_HASH_RATE) { + lw_xor_block(state, in, SKINNY_TK2_HASH_RATE); + skinny_tk2_permute(state); + in += SKINNY_TK2_HASH_RATE; + inlen -= SKINNY_TK2_HASH_RATE; + } + + /* Pad and process the last block */ + temp = (unsigned)inlen; + lw_xor_block(state, in, temp); + state[temp] ^= 0x80; /* padding */ + skinny_tk2_permute(state); + + /* Generate the hash output */ + memcpy(out, state, 16); + skinny_tk2_permute(state); + memcpy(out + 16, state, 16); + return 0; +} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.h b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.h new file mode 100644 index 0000000..f75ce9f --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk2/rhys/skinny-hash.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_SKINNY_HASH_H +#define LWCRYPTO_SKINNY_HASH_H + +#include "aead-common.h" + +/** + * \file skinny-hash.h + * \brief Hash algorithms based on the SKINNY block cipher. + * + * The SKINNY-AEAD family includes two hash algorithms: + * + * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the + * SKINNY-128-384 tweakable block cipher. This is the primary hashing + * member of the family. + * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the + * SKINNY-128-256 tweakable block cipher. + * + * References: https://sites.google.com/site/skinnycipher/home + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the hash output for SKINNY-tk3-HASH and SKINNY-tk2-HASH. + */ +#define SKINNY_HASH_SIZE 32 + +/** + * \brief Meta-information block for the SKINNY-tk3-HASH algorithm. + */ +extern aead_hash_algorithm_t const skinny_tk3_hash_algorithm; + +/** + * \brief Meta-information block for the SKINNY-tk2-HASH algorithm. + */ +extern aead_hash_algorithm_t const skinny_tk2_hash_algorithm; + +/** + * \brief Hashes a block of input data with SKINNY-tk3-HASH to + * generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * SKINNY_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int skinny_tk3_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with SKINNY-tk2-HASH to + * generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * SKINNY_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int skinny_tk2_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/api.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/hash.c deleted file mode 100644 index c51ca3f..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "skinny-hash.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return skinny_tk3_hash(out, in, inlen); -} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128-avr.S b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128-avr.S deleted file mode 100644 index d342cd5..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128-avr.S +++ /dev/null @@ -1,10099 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 256 -table_0: - .byte 101 - .byte 76 - .byte 106 - .byte 66 - .byte 75 - .byte 99 - .byte 67 - .byte 107 - .byte 85 - .byte 117 - .byte 90 - .byte 122 - .byte 83 - .byte 115 - .byte 91 - .byte 123 - .byte 53 - .byte 140 - .byte 58 - .byte 129 - .byte 137 - .byte 51 - .byte 128 - .byte 59 - .byte 149 - .byte 37 - .byte 152 - .byte 42 - .byte 144 - .byte 35 - .byte 153 - .byte 43 - .byte 229 - .byte 204 - .byte 232 - .byte 193 - .byte 201 - .byte 224 - .byte 192 - .byte 233 - .byte 213 - .byte 245 - .byte 216 - .byte 248 - .byte 208 - .byte 240 - .byte 217 - .byte 249 - .byte 165 - .byte 28 - .byte 168 - .byte 18 - .byte 27 - .byte 160 - .byte 19 - .byte 169 - .byte 5 - .byte 181 - .byte 10 - .byte 184 - .byte 3 - .byte 176 - .byte 11 - .byte 185 - .byte 50 - .byte 136 - .byte 60 - .byte 133 - .byte 141 - .byte 52 - .byte 132 - .byte 61 - .byte 145 - .byte 34 - .byte 156 - .byte 44 - .byte 148 - .byte 36 - .byte 157 - .byte 45 - .byte 98 - .byte 74 - .byte 108 - .byte 69 - .byte 77 - .byte 100 - .byte 68 - .byte 109 - .byte 82 - .byte 114 - .byte 92 - .byte 124 - .byte 84 - .byte 116 - .byte 93 - .byte 125 - .byte 161 - .byte 26 - .byte 172 - .byte 21 - .byte 29 - .byte 164 - .byte 20 - .byte 173 - .byte 2 - .byte 177 - .byte 12 - .byte 188 - .byte 4 - .byte 180 - .byte 13 - .byte 189 - .byte 225 - .byte 200 - .byte 236 - .byte 197 - .byte 205 - .byte 228 - .byte 196 - .byte 237 - .byte 209 - .byte 241 - .byte 220 - .byte 252 - .byte 212 - .byte 244 - .byte 221 - .byte 253 - .byte 54 - .byte 142 - .byte 56 - .byte 130 - .byte 139 - .byte 48 - .byte 131 - .byte 57 - .byte 150 - .byte 38 - .byte 154 - .byte 40 - .byte 147 - .byte 32 - .byte 155 - .byte 41 - .byte 102 - .byte 78 - .byte 104 - .byte 65 - .byte 73 - .byte 96 - .byte 64 - .byte 105 - .byte 86 - .byte 118 - .byte 88 - .byte 120 - .byte 80 - .byte 112 - .byte 89 - .byte 121 - .byte 166 - .byte 30 - .byte 170 - .byte 17 - .byte 25 - .byte 163 - .byte 16 - .byte 171 - .byte 6 - .byte 182 - .byte 8 - .byte 186 - .byte 0 - .byte 179 - .byte 9 - .byte 187 - .byte 230 - .byte 206 - .byte 234 - .byte 194 - .byte 203 - .byte 227 - .byte 195 - .byte 235 - .byte 214 - .byte 246 - .byte 218 - .byte 250 - .byte 211 - .byte 243 - .byte 219 - .byte 251 - .byte 49 - .byte 138 - .byte 62 - .byte 134 - .byte 143 - .byte 55 - .byte 135 - .byte 63 - .byte 146 - .byte 33 - .byte 158 - .byte 46 - .byte 151 - .byte 39 - .byte 159 - .byte 47 - .byte 97 - .byte 72 - .byte 110 - .byte 70 - .byte 79 - .byte 103 - .byte 71 - .byte 111 - .byte 81 - .byte 113 - .byte 94 - .byte 126 - .byte 87 - .byte 119 - .byte 95 - .byte 127 - .byte 162 - .byte 24 - .byte 174 - .byte 22 - .byte 31 - .byte 167 - .byte 23 - .byte 175 - .byte 1 - .byte 178 - .byte 14 - .byte 190 - .byte 7 - .byte 183 - .byte 15 - .byte 191 - .byte 226 - .byte 202 - .byte 238 - .byte 198 - .byte 207 - .byte 231 - .byte 199 - .byte 239 - .byte 210 - .byte 242 - .byte 222 - .byte 254 - .byte 215 - .byte 247 - .byte 223 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 256 -table_1: - .byte 172 - .byte 232 - .byte 104 - .byte 60 - .byte 108 - .byte 56 - .byte 168 - .byte 236 - .byte 170 - .byte 174 - .byte 58 - .byte 62 - .byte 106 - .byte 110 - .byte 234 - .byte 238 - .byte 166 - .byte 163 - .byte 51 - .byte 54 - .byte 102 - .byte 99 - .byte 227 - .byte 230 - .byte 225 - .byte 164 - .byte 97 - .byte 52 - .byte 49 - .byte 100 - .byte 161 - .byte 228 - .byte 141 - .byte 201 - .byte 73 - .byte 29 - .byte 77 - .byte 25 - .byte 137 - .byte 205 - .byte 139 - .byte 143 - .byte 27 - .byte 31 - .byte 75 - .byte 79 - .byte 203 - .byte 207 - .byte 133 - .byte 192 - .byte 64 - .byte 21 - .byte 69 - .byte 16 - .byte 128 - .byte 197 - .byte 130 - .byte 135 - .byte 18 - .byte 23 - .byte 66 - .byte 71 - .byte 194 - .byte 199 - .byte 150 - .byte 147 - .byte 3 - .byte 6 - .byte 86 - .byte 83 - .byte 211 - .byte 214 - .byte 209 - .byte 148 - .byte 81 - .byte 4 - .byte 1 - .byte 84 - .byte 145 - .byte 212 - .byte 156 - .byte 216 - .byte 88 - .byte 12 - .byte 92 - .byte 8 - .byte 152 - .byte 220 - .byte 154 - .byte 158 - .byte 10 - .byte 14 - .byte 90 - .byte 94 - .byte 218 - .byte 222 - .byte 149 - .byte 208 - .byte 80 - .byte 5 - .byte 85 - .byte 0 - .byte 144 - .byte 213 - .byte 146 - .byte 151 - .byte 2 - .byte 7 - .byte 82 - .byte 87 - .byte 210 - .byte 215 - .byte 157 - .byte 217 - .byte 89 - .byte 13 - .byte 93 - .byte 9 - .byte 153 - .byte 221 - .byte 155 - .byte 159 - .byte 11 - .byte 15 - .byte 91 - .byte 95 - .byte 219 - .byte 223 - .byte 22 - .byte 19 - .byte 131 - .byte 134 - .byte 70 - .byte 67 - .byte 195 - .byte 198 - .byte 65 - .byte 20 - .byte 193 - .byte 132 - .byte 17 - .byte 68 - .byte 129 - .byte 196 - .byte 28 - .byte 72 - .byte 200 - .byte 140 - .byte 76 - .byte 24 - .byte 136 - .byte 204 - .byte 26 - .byte 30 - .byte 138 - .byte 142 - .byte 74 - .byte 78 - .byte 202 - .byte 206 - .byte 53 - .byte 96 - .byte 224 - .byte 165 - .byte 101 - .byte 48 - .byte 160 - .byte 229 - .byte 50 - .byte 55 - .byte 162 - .byte 167 - .byte 98 - .byte 103 - .byte 226 - .byte 231 - .byte 61 - .byte 105 - .byte 233 - .byte 173 - .byte 109 - .byte 57 - .byte 169 - .byte 237 - .byte 59 - .byte 63 - .byte 171 - .byte 175 - .byte 107 - .byte 111 - .byte 235 - .byte 239 - .byte 38 - .byte 35 - .byte 179 - .byte 182 - .byte 118 - .byte 115 - .byte 243 - .byte 246 - .byte 113 - .byte 36 - .byte 241 - .byte 180 - .byte 33 - .byte 116 - .byte 177 - .byte 244 - .byte 44 - .byte 120 - .byte 248 - .byte 188 - .byte 124 - .byte 40 - .byte 184 - .byte 252 - .byte 42 - .byte 46 - .byte 186 - .byte 190 - .byte 122 - .byte 126 - .byte 250 - .byte 254 - .byte 37 - .byte 112 - .byte 240 - .byte 181 - .byte 117 - .byte 32 - .byte 176 - .byte 245 - .byte 34 - .byte 39 - .byte 178 - .byte 183 - .byte 114 - .byte 119 - .byte 242 - .byte 247 - .byte 45 - .byte 121 - .byte 249 - .byte 189 - .byte 125 - .byte 41 - .byte 185 - .byte 253 - .byte 43 - .byte 47 - .byte 187 - .byte 191 - .byte 123 - .byte 127 - .byte 251 - .byte 255 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_2, @object - .size table_2, 256 -table_2: - .byte 0 - .byte 2 - .byte 4 - .byte 6 - .byte 8 - .byte 10 - .byte 12 - .byte 14 - .byte 16 - .byte 18 - .byte 20 - .byte 22 - .byte 24 - .byte 26 - .byte 28 - .byte 30 - .byte 32 - .byte 34 - .byte 36 - .byte 38 - .byte 40 - .byte 42 - .byte 44 - .byte 46 - .byte 48 - .byte 50 - .byte 52 - .byte 54 - .byte 56 - .byte 58 - .byte 60 - .byte 62 - .byte 65 - .byte 67 - .byte 69 - .byte 71 - .byte 73 - .byte 75 - .byte 77 - .byte 79 - .byte 81 - .byte 83 - .byte 85 - .byte 87 - .byte 89 - .byte 91 - .byte 93 - .byte 95 - .byte 97 - .byte 99 - .byte 101 - .byte 103 - .byte 105 - .byte 107 - .byte 109 - .byte 111 - .byte 113 - .byte 115 - .byte 117 - .byte 119 - .byte 121 - .byte 123 - .byte 125 - .byte 127 - .byte 128 - .byte 130 - .byte 132 - .byte 134 - .byte 136 - .byte 138 - .byte 140 - .byte 142 - .byte 144 - .byte 146 - .byte 148 - .byte 150 - .byte 152 - .byte 154 - .byte 156 - .byte 158 - .byte 160 - .byte 162 - .byte 164 - .byte 166 - .byte 168 - .byte 170 - .byte 172 - .byte 174 - .byte 176 - .byte 178 - .byte 180 - .byte 182 - .byte 184 - .byte 186 - .byte 188 - .byte 190 - .byte 193 - .byte 195 - .byte 197 - .byte 199 - .byte 201 - .byte 203 - .byte 205 - .byte 207 - .byte 209 - .byte 211 - .byte 213 - .byte 215 - .byte 217 - .byte 219 - .byte 221 - .byte 223 - .byte 225 - .byte 227 - .byte 229 - .byte 231 - .byte 233 - .byte 235 - .byte 237 - .byte 239 - .byte 241 - .byte 243 - .byte 245 - .byte 247 - .byte 249 - .byte 251 - .byte 253 - .byte 255 - .byte 1 - .byte 3 - .byte 5 - .byte 7 - .byte 9 - .byte 11 - .byte 13 - .byte 15 - .byte 17 - .byte 19 - .byte 21 - .byte 23 - .byte 25 - .byte 27 - .byte 29 - .byte 31 - .byte 33 - .byte 35 - .byte 37 - .byte 39 - .byte 41 - .byte 43 - .byte 45 - .byte 47 - .byte 49 - .byte 51 - .byte 53 - .byte 55 - .byte 57 - .byte 59 - .byte 61 - .byte 63 - .byte 64 - .byte 66 - .byte 68 - .byte 70 - .byte 72 - .byte 74 - .byte 76 - .byte 78 - .byte 80 - .byte 82 - .byte 84 - .byte 86 - .byte 88 - .byte 90 - .byte 92 - .byte 94 - .byte 96 - .byte 98 - .byte 100 - .byte 102 - .byte 104 - .byte 106 - .byte 108 - .byte 110 - .byte 112 - .byte 114 - .byte 116 - .byte 118 - .byte 120 - .byte 122 - .byte 124 - .byte 126 - .byte 129 - .byte 131 - .byte 133 - .byte 135 - .byte 137 - .byte 139 - .byte 141 - .byte 143 - .byte 145 - .byte 147 - .byte 149 - .byte 151 - .byte 153 - .byte 155 - .byte 157 - .byte 159 - .byte 161 - .byte 163 - .byte 165 - .byte 167 - .byte 169 - .byte 171 - .byte 173 - .byte 175 - .byte 177 - .byte 179 - .byte 181 - .byte 183 - .byte 185 - .byte 187 - .byte 189 - .byte 191 - .byte 192 - .byte 194 - .byte 196 - .byte 198 - .byte 200 - .byte 202 - .byte 204 - .byte 206 - .byte 208 - .byte 210 - .byte 212 - .byte 214 - .byte 216 - .byte 218 - .byte 220 - .byte 222 - .byte 224 - .byte 226 - .byte 228 - .byte 230 - .byte 232 - .byte 234 - .byte 236 - .byte 238 - .byte 240 - .byte 242 - .byte 244 - .byte 246 - .byte 248 - .byte 250 - .byte 252 - .byte 254 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_3, @object - .size table_3, 256 -table_3: - .byte 0 - .byte 128 - .byte 1 - .byte 129 - .byte 2 - .byte 130 - .byte 3 - .byte 131 - .byte 4 - .byte 132 - .byte 5 - .byte 133 - .byte 6 - .byte 134 - .byte 7 - .byte 135 - .byte 8 - .byte 136 - .byte 9 - .byte 137 - .byte 10 - .byte 138 - .byte 11 - .byte 139 - .byte 12 - .byte 140 - .byte 13 - .byte 141 - .byte 14 - .byte 142 - .byte 15 - .byte 143 - .byte 16 - .byte 144 - .byte 17 - .byte 145 - .byte 18 - .byte 146 - .byte 19 - .byte 147 - .byte 20 - .byte 148 - .byte 21 - .byte 149 - .byte 22 - .byte 150 - .byte 23 - .byte 151 - .byte 24 - .byte 152 - .byte 25 - .byte 153 - .byte 26 - .byte 154 - .byte 27 - .byte 155 - .byte 28 - .byte 156 - .byte 29 - .byte 157 - .byte 30 - .byte 158 - .byte 31 - .byte 159 - .byte 160 - .byte 32 - .byte 161 - .byte 33 - .byte 162 - .byte 34 - .byte 163 - .byte 35 - .byte 164 - .byte 36 - .byte 165 - .byte 37 - .byte 166 - .byte 38 - .byte 167 - .byte 39 - .byte 168 - .byte 40 - .byte 169 - .byte 41 - .byte 170 - .byte 42 - .byte 171 - .byte 43 - .byte 172 - .byte 44 - .byte 173 - .byte 45 - .byte 174 - .byte 46 - .byte 175 - .byte 47 - .byte 176 - .byte 48 - .byte 177 - .byte 49 - .byte 178 - .byte 50 - .byte 179 - .byte 51 - .byte 180 - .byte 52 - .byte 181 - .byte 53 - .byte 182 - .byte 54 - .byte 183 - .byte 55 - .byte 184 - .byte 56 - .byte 185 - .byte 57 - .byte 186 - .byte 58 - .byte 187 - .byte 59 - .byte 188 - .byte 60 - .byte 189 - .byte 61 - .byte 190 - .byte 62 - .byte 191 - .byte 63 - .byte 64 - .byte 192 - .byte 65 - .byte 193 - .byte 66 - .byte 194 - .byte 67 - .byte 195 - .byte 68 - .byte 196 - .byte 69 - .byte 197 - .byte 70 - .byte 198 - .byte 71 - .byte 199 - .byte 72 - .byte 200 - .byte 73 - .byte 201 - .byte 74 - .byte 202 - .byte 75 - .byte 203 - .byte 76 - .byte 204 - .byte 77 - .byte 205 - .byte 78 - .byte 206 - .byte 79 - .byte 207 - .byte 80 - .byte 208 - .byte 81 - .byte 209 - .byte 82 - .byte 210 - .byte 83 - .byte 211 - .byte 84 - .byte 212 - .byte 85 - .byte 213 - .byte 86 - .byte 214 - .byte 87 - .byte 215 - .byte 88 - .byte 216 - .byte 89 - .byte 217 - .byte 90 - .byte 218 - .byte 91 - .byte 219 - .byte 92 - .byte 220 - .byte 93 - .byte 221 - .byte 94 - .byte 222 - .byte 95 - .byte 223 - .byte 224 - .byte 96 - .byte 225 - .byte 97 - .byte 226 - .byte 98 - .byte 227 - .byte 99 - .byte 228 - .byte 100 - .byte 229 - .byte 101 - .byte 230 - .byte 102 - .byte 231 - .byte 103 - .byte 232 - .byte 104 - .byte 233 - .byte 105 - .byte 234 - .byte 106 - .byte 235 - .byte 107 - .byte 236 - .byte 108 - .byte 237 - .byte 109 - .byte 238 - .byte 110 - .byte 239 - .byte 111 - .byte 240 - .byte 112 - .byte 241 - .byte 113 - .byte 242 - .byte 114 - .byte 243 - .byte 115 - .byte 244 - .byte 116 - .byte 245 - .byte 117 - .byte 246 - .byte 118 - .byte 247 - .byte 119 - .byte 248 - .byte 120 - .byte 249 - .byte 121 - .byte 250 - .byte 122 - .byte 251 - .byte 123 - .byte 252 - .byte 124 - .byte 253 - .byte 125 - .byte 254 - .byte 126 - .byte 255 - .byte 127 - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_4, @object - .size table_4, 112 -table_4: - .byte 1 - .byte 0 - .byte 3 - .byte 0 - .byte 7 - .byte 0 - .byte 15 - .byte 0 - .byte 15 - .byte 1 - .byte 14 - .byte 3 - .byte 13 - .byte 3 - .byte 11 - .byte 3 - .byte 7 - .byte 3 - .byte 15 - .byte 2 - .byte 14 - .byte 1 - .byte 12 - .byte 3 - .byte 9 - .byte 3 - .byte 3 - .byte 3 - .byte 7 - .byte 2 - .byte 14 - .byte 0 - .byte 13 - .byte 1 - .byte 10 - .byte 3 - .byte 5 - .byte 3 - .byte 11 - .byte 2 - .byte 6 - .byte 1 - .byte 12 - .byte 2 - .byte 8 - .byte 1 - .byte 0 - .byte 3 - .byte 1 - .byte 2 - .byte 2 - .byte 0 - .byte 5 - .byte 0 - .byte 11 - .byte 0 - .byte 7 - .byte 1 - .byte 14 - .byte 2 - .byte 12 - .byte 1 - .byte 8 - .byte 3 - .byte 1 - .byte 3 - .byte 3 - .byte 2 - .byte 6 - .byte 0 - .byte 13 - .byte 0 - .byte 11 - .byte 1 - .byte 6 - .byte 3 - .byte 13 - .byte 2 - .byte 10 - .byte 1 - .byte 4 - .byte 3 - .byte 9 - .byte 2 - .byte 2 - .byte 1 - .byte 4 - .byte 2 - .byte 8 - .byte 0 - .byte 1 - .byte 1 - .byte 2 - .byte 2 - .byte 4 - .byte 0 - .byte 9 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 2 - .byte 12 - .byte 0 - .byte 9 - .byte 1 - .byte 2 - .byte 3 - .byte 5 - .byte 2 - .byte 10 - .byte 0 - - .text -.global skinny_128_384_init - .type skinny_128_384_init, @function -skinny_128_384_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,12 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_384_init, .-skinny_128_384_init - - .text -.global skinny_128_384_encrypt - .type skinny_128_384_encrypt, @function -skinny_128_384_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - std Y+33,r18 - std Y+34,r19 - std Y+35,r20 - std Y+36,r21 - ldd r18,Z+36 - ldd r19,Z+37 - ldd r20,Z+38 - ldd r21,Z+39 - std Y+37,r18 - std Y+38,r19 - std Y+39,r20 - std Y+40,r21 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - std Y+41,r18 - std Y+42,r19 - std Y+43,r20 - std Y+44,r21 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - std Y+45,r18 - std Y+46,r19 - std Y+47,r20 - std Y+48,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -114: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r13 - std Y+42,r17 - std Y+43,r12 - std Y+44,r25 - std Y+45,r14 - std Y+46,r16 - std Y+47,r24 - std Y+48,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,112 - brne 5721f - rjmp 790f -5721: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r13 - std Y+34,r17 - std Y+35,r12 - std Y+36,r25 - std Y+37,r14 - std Y+38,r16 - std Y+39,r24 - std Y+40,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 114b -790: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_encrypt, .-skinny_128_384_encrypt - -.global skinny_128_384_encrypt_tk_full - .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt - - .text -.global skinny_128_384_decrypt - .type skinny_128_384_decrypt, @function -skinny_128_384_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,48 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 68 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r23 - std Y+2,r2 - std Y+3,r21 - std Y+4,r20 - std Y+5,r3 - std Y+6,r18 - std Y+7,r19 - std Y+8,r22 - std Y+9,r9 - std Y+10,r10 - std Y+11,r7 - std Y+12,r6 - std Y+13,r11 - std Y+14,r4 - std Y+15,r5 - std Y+16,r8 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r23 - std Y+18,r2 - std Y+19,r21 - std Y+20,r20 - std Y+21,r3 - std Y+22,r18 - std Y+23,r19 - std Y+24,r22 - std Y+25,r9 - std Y+26,r10 - std Y+27,r7 - std Y+28,r6 - std Y+29,r11 - std Y+30,r4 - std Y+31,r5 - std Y+32,r8 - ldd r18,Z+32 - ldd r19,Z+33 - ldd r20,Z+34 - ldd r21,Z+35 - ldd r22,Z+36 - ldd r23,Z+37 - ldd r2,Z+38 - ldd r3,Z+39 - ldd r4,Z+40 - ldd r5,Z+41 - ldd r6,Z+42 - ldd r7,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - std Y+33,r23 - std Y+34,r2 - std Y+35,r21 - std Y+36,r20 - std Y+37,r3 - std Y+38,r18 - std Y+39,r19 - std Y+40,r22 - std Y+41,r9 - std Y+42,r10 - std Y+43,r7 - std Y+44,r6 - std Y+45,r11 - std Y+46,r4 - std Y+47,r5 - std Y+48,r8 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -122: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 122b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,28 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -150: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 150b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,28 - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 -179: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 179b - std Y+33,r12 - std Y+34,r13 - std Y+35,r14 - std Y+36,r15 - std Y+37,r24 - std Y+38,r25 - std Y+39,r16 - std Y+40,r17 - ldi r26,28 - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 -207: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 207b - std Y+41,r12 - std Y+42,r13 - std Y+43,r14 - std Y+44,r15 - std Y+45,r24 - std Y+46,r25 - std Y+47,r16 - std Y+48,r17 - ldi r26,112 -227: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+41 - eor r22,r0 - ldd r0,Y+42 - eor r23,r0 - ldd r0,Y+43 - eor r2,r0 - ldd r0,Y+44 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldd r0,Y+45 - eor r4,r0 - ldd r0,Y+46 - eor r5,r0 - ldd r0,Y+47 - eor r6,r0 - ldd r0,Y+48 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+33 - eor r4,r0 - ldd r0,Y+34 - eor r5,r0 - ldd r0,Y+35 - eor r6,r0 - ldd r0,Y+36 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldd r0,Y+37 - eor r8,r0 - ldd r0,Y+38 - eor r9,r0 - ldd r0,Y+39 - eor r10,r0 - ldd r0,Y+40 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+33 - ldd r13,Y+34 - ldd r14,Y+35 - ldd r15,Y+36 - ldd r24,Y+37 - ldd r25,Y+38 - ldd r16,Y+39 - ldd r17,Y+40 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+33,r14 - std Y+34,r12 - std Y+35,r24 - std Y+36,r17 - std Y+37,r16 - std Y+38,r15 - std Y+39,r25 - std Y+40,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+41 - eor r8,r0 - ldd r0,Y+42 - eor r9,r0 - ldd r0,Y+43 - eor r10,r0 - ldd r0,Y+44 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldd r0,Y+45 - eor r18,r0 - ldd r0,Y+46 - eor r19,r0 - ldd r0,Y+47 - eor r20,r0 - ldd r0,Y+48 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+41 - ldd r13,Y+42 - ldd r14,Y+43 - ldd r15,Y+44 - ldd r24,Y+45 - ldd r25,Y+46 - ldd r16,Y+47 - ldd r17,Y+48 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+41,r14 - std Y+42,r12 - std Y+43,r24 - std Y+44,r17 - std Y+45,r16 - std Y+46,r15 - std Y+47,r25 - std Y+48,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+33 - eor r18,r0 - ldd r0,Y+34 - eor r19,r0 - ldd r0,Y+35 - eor r20,r0 - ldd r0,Y+36 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldd r0,Y+37 - eor r22,r0 - ldd r0,Y+38 - eor r23,r0 - ldd r0,Y+39 - eor r2,r0 - ldd r0,Y+40 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 903f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 227b -903: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+49 - ldd r27,Y+50 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,50 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_384_decrypt, .-skinny_128_384_decrypt - - .text -.global skinny_128_256_init - .type skinny_128_256_init, @function -skinny_128_256_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ldi r22,8 -1: - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - st Z+,r18 - st Z+,r19 - st Z+,r20 - st Z+,r21 - dec r22 - brne 1b - ret - .size skinny_128_256_init, .-skinny_128_256_init - - .text -.global skinny_128_256_encrypt - .type skinny_128_256_encrypt, @function -skinny_128_256_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Y+5,r18 - std Y+6,r19 - std Y+7,r20 - std Y+8,r21 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - std Y+9,r18 - std Y+10,r19 - std Y+11,r20 - std Y+12,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - std Y+13,r18 - std Y+14,r19 - std Y+15,r20 - std Y+16,r21 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - ldd r18,Z+20 - ldd r19,Z+21 - ldd r20,Z+22 - ldd r21,Z+23 - std Y+21,r18 - std Y+22,r19 - std Y+23,r20 - std Y+24,r21 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - std Y+25,r18 - std Y+26,r19 - std Y+27,r20 - std Y+28,r21 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - std Y+29,r18 - std Y+30,r19 - std Y+31,r20 - std Y+32,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r26,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - mov r26,r1 -82: - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - ldi r27,2 - eor r4,r27 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - inc r26 - ldi r27,2 - eor r22,r27 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - mov r0,r2 - mov r2,r22 - mov r22,r0 - mov r0,r3 - mov r3,r23 - mov r23,r0 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - inc r26 - ldi r27,2 - eor r18,r27 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r20 - mov r20,r18 - mov r18,r0 - mov r0,r21 - mov r21,r19 - mov r19,r0 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r13 - std Y+10,r17 - std Y+11,r12 - std Y+12,r25 - std Y+13,r14 - std Y+14,r16 - std Y+15,r24 - std Y+16,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r13 - std Y+26,r17 - std Y+27,r12 - std Y+28,r25 - std Y+29,r14 - std Y+30,r16 - std Y+31,r24 - std Y+32,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - inc r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - inc r26 - ldi r27,2 - eor r8,r27 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - cpi r26,96 - breq 594f - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r13 - std Y+2,r17 - std Y+3,r12 - std Y+4,r25 - std Y+5,r14 - std Y+6,r16 - std Y+7,r24 - std Y+8,r15 - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r27,hh8(table_2) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r13 - std Y+18,r17 - std Y+19,r12 - std Y+20,r25 - std Y+21,r14 - std Y+22,r16 - std Y+23,r24 - std Y+24,r15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r27,hh8(table_0) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 82b -594: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_encrypt, .-skinny_128_256_encrypt - -.global skinny_128_256_encrypt_tk_full - .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt - - .text -.global skinny_128_256_decrypt - .type skinny_128_256_decrypt, @function -skinny_128_256_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,32 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 52 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - ldd r4,Z+8 - ldd r5,Z+9 - ldd r6,Z+10 - ldd r7,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r18,Z+16 - ldd r19,Z+17 - ldd r20,Z+18 - ldd r21,Z+19 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - ldd r4,Z+24 - ldd r5,Z+25 - ldd r6,Z+26 - ldd r7,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Y+17,r18 - std Y+18,r19 - std Y+19,r20 - std Y+20,r21 - std Y+21,r22 - std Y+22,r23 - std Y+23,r2 - std Y+24,r3 - std Y+25,r4 - std Y+26,r5 - std Y+27,r6 - std Y+28,r7 - std Y+29,r8 - std Y+30,r9 - std Y+31,r10 - std Y+32,r11 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ldi r30,lo8(table_2) - ldi r31,hi8(table_2) -#if defined(RAMPZ) - ldi r26,hh8(table_2) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,24 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 -90: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 90b - std Y+17,r12 - std Y+18,r13 - std Y+19,r14 - std Y+20,r15 - std Y+21,r24 - std Y+22,r25 - std Y+23,r16 - std Y+24,r17 - ldi r26,24 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 -118: - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - dec r26 - brne 118b - std Y+25,r12 - std Y+26,r13 - std Y+27,r14 - std Y+28,r15 - std Y+29,r24 - std Y+30,r25 - std Y+31,r16 - std Y+32,r17 - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r26,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r26 -#endif - ldi r26,96 -139: - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - eor r8,r22 - eor r9,r23 - eor r10,r2 - eor r11,r3 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - mov r0,r4 - mov r4,r5 - mov r5,r6 - mov r6,r7 - mov r7,r0 - mov r0,r8 - mov r8,r10 - mov r10,r0 - mov r0,r9 - mov r9,r11 - mov r11,r0 - mov r0,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - ldd r0,Y+9 - eor r22,r0 - ldd r0,Y+10 - eor r23,r0 - ldd r0,Y+11 - eor r2,r0 - ldd r0,Y+12 - eor r3,r0 - ldd r0,Y+25 - eor r22,r0 - ldd r0,Y+26 - eor r23,r0 - ldd r0,Y+27 - eor r2,r0 - ldd r0,Y+28 - eor r3,r0 - ldd r0,Y+13 - eor r4,r0 - ldd r0,Y+14 - eor r5,r0 - ldd r0,Y+15 - eor r6,r0 - ldd r0,Y+16 - eor r7,r0 - ldd r0,Y+29 - eor r4,r0 - ldd r0,Y+30 - eor r5,r0 - ldd r0,Y+31 - eor r6,r0 - ldd r0,Y+32 - eor r7,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - ldi r27,2 - eor r8,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r18 - mov r18,r20 - mov r20,r0 - mov r0,r19 - mov r19,r21 - mov r21,r0 - mov r0,r3 - mov r3,r2 - mov r2,r23 - mov r23,r22 - mov r22,r0 - ldd r0,Y+1 - eor r4,r0 - ldd r0,Y+2 - eor r5,r0 - ldd r0,Y+3 - eor r6,r0 - ldd r0,Y+4 - eor r7,r0 - ldd r0,Y+17 - eor r4,r0 - ldd r0,Y+18 - eor r5,r0 - ldd r0,Y+19 - eor r6,r0 - ldd r0,Y+20 - eor r7,r0 - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - ldd r0,Y+21 - eor r8,r0 - ldd r0,Y+22 - eor r9,r0 - ldd r0,Y+23 - eor r10,r0 - ldd r0,Y+24 - eor r11,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r4,r27 - ldi r27,2 - eor r18,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - ldd r24,Y+5 - ldd r25,Y+6 - ldd r16,Y+7 - ldd r17,Y+8 - std Y+1,r14 - std Y+2,r12 - std Y+3,r24 - std Y+4,r17 - std Y+5,r16 - std Y+6,r15 - std Y+7,r25 - std Y+8,r13 - ldd r12,Y+17 - ldd r13,Y+18 - ldd r14,Y+19 - ldd r15,Y+20 - ldd r24,Y+21 - ldd r25,Y+22 - ldd r16,Y+23 - ldd r17,Y+24 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+17,r14 - std Y+18,r12 - std Y+19,r24 - std Y+20,r17 - std Y+21,r16 - std Y+22,r15 - std Y+23,r25 - std Y+24,r13 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - mov r0,r18 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r7 - mov r7,r6 - mov r6,r5 - mov r5,r4 - mov r4,r0 - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - ldd r0,Y+25 - eor r8,r0 - ldd r0,Y+26 - eor r9,r0 - ldd r0,Y+27 - eor r10,r0 - ldd r0,Y+28 - eor r11,r0 - ldd r0,Y+13 - eor r18,r0 - ldd r0,Y+14 - eor r19,r0 - ldd r0,Y+15 - eor r20,r0 - ldd r0,Y+16 - eor r21,r0 - ldd r0,Y+29 - eor r18,r0 - ldd r0,Y+30 - eor r19,r0 - ldd r0,Y+31 - eor r20,r0 - ldd r0,Y+32 - eor r21,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r8,r27 - ldi r27,2 - eor r22,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - ldd r12,Y+9 - ldd r13,Y+10 - ldd r14,Y+11 - ldd r15,Y+12 - ldd r24,Y+13 - ldd r25,Y+14 - ldd r16,Y+15 - ldd r17,Y+16 - std Y+9,r14 - std Y+10,r12 - std Y+11,r24 - std Y+12,r17 - std Y+13,r16 - std Y+14,r15 - std Y+15,r25 - std Y+16,r13 - ldd r12,Y+25 - ldd r13,Y+26 - ldd r14,Y+27 - ldd r15,Y+28 - ldd r24,Y+29 - ldd r25,Y+30 - ldd r16,Y+31 - ldd r17,Y+32 - mov r30,r12 -#if defined(RAMPZ) - elpm r12,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r12,Z -#elif defined(__AVR_TINY__) - ld r12,Z -#else - lpm - mov r12,r0 -#endif - mov r30,r13 -#if defined(RAMPZ) - elpm r13,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r13,Z -#elif defined(__AVR_TINY__) - ld r13,Z -#else - lpm - mov r13,r0 -#endif - mov r30,r14 -#if defined(RAMPZ) - elpm r14,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r14,Z -#elif defined(__AVR_TINY__) - ld r14,Z -#else - lpm - mov r14,r0 -#endif - mov r30,r15 -#if defined(RAMPZ) - elpm r15,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r15,Z -#elif defined(__AVR_TINY__) - ld r15,Z -#else - lpm - mov r15,r0 -#endif - mov r30,r24 -#if defined(RAMPZ) - elpm r24,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r24,Z -#elif defined(__AVR_TINY__) - ld r24,Z -#else - lpm - mov r24,r0 -#endif - mov r30,r25 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - mov r30,r16 -#if defined(RAMPZ) - elpm r16,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r16,Z -#elif defined(__AVR_TINY__) - ld r16,Z -#else - lpm - mov r16,r0 -#endif - mov r30,r17 -#if defined(RAMPZ) - elpm r17,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r17,Z -#elif defined(__AVR_TINY__) - ld r17,Z -#else - lpm - mov r17,r0 -#endif - std Y+25,r14 - std Y+26,r12 - std Y+27,r24 - std Y+28,r17 - std Y+29,r16 - std Y+30,r15 - std Y+31,r25 - std Y+32,r13 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - eor r22,r4 - eor r23,r5 - eor r2,r6 - eor r3,r7 - mov r0,r22 - mov r22,r23 - mov r23,r2 - mov r2,r3 - mov r3,r0 - mov r0,r4 - mov r4,r6 - mov r6,r0 - mov r0,r5 - mov r5,r7 - mov r7,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - ldd r0,Y+1 - eor r18,r0 - ldd r0,Y+2 - eor r19,r0 - ldd r0,Y+3 - eor r20,r0 - ldd r0,Y+4 - eor r21,r0 - ldd r0,Y+17 - eor r18,r0 - ldd r0,Y+18 - eor r19,r0 - ldd r0,Y+19 - eor r20,r0 - ldd r0,Y+20 - eor r21,r0 - ldd r0,Y+5 - eor r22,r0 - ldd r0,Y+6 - eor r23,r0 - ldd r0,Y+7 - eor r2,r0 - ldd r0,Y+8 - eor r3,r0 - ldd r0,Y+21 - eor r22,r0 - ldd r0,Y+22 - eor r23,r0 - ldd r0,Y+23 - eor r2,r0 - ldd r0,Y+24 - eor r3,r0 - ldi r30,lo8(table_4) - ldi r31,hi8(table_4) -#if defined(RAMPZ) - ldi r24,hh8(table_4) - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r22,r27 - dec r26 - mov r30,r26 -#if defined(RAMPZ) - elpm r27,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r27,Z -#elif defined(__AVR_TINY__) - ld r27,Z -#else - lpm - mov r27,r0 -#endif - eor r18,r27 - ldi r27,2 - eor r4,r27 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r27,hh8(table_1) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - mov r30,r18 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - mov r30,r19 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - mov r30,r20 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - mov r30,r21 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - mov r30,r22 -#if defined(RAMPZ) - elpm r22,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r22,Z -#elif defined(__AVR_TINY__) - ld r22,Z -#else - lpm - mov r22,r0 -#endif - mov r30,r23 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - mov r30,r2 -#if defined(RAMPZ) - elpm r2,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r2,Z -#elif defined(__AVR_TINY__) - ld r2,Z -#else - lpm - mov r2,r0 -#endif - mov r30,r3 -#if defined(RAMPZ) - elpm r3,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r3,Z -#elif defined(__AVR_TINY__) - ld r3,Z -#else - lpm - mov r3,r0 -#endif - mov r30,r4 -#if defined(RAMPZ) - elpm r4,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r4,Z -#elif defined(__AVR_TINY__) - ld r4,Z -#else - lpm - mov r4,r0 -#endif - mov r30,r5 -#if defined(RAMPZ) - elpm r5,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r5,Z -#elif defined(__AVR_TINY__) - ld r5,Z -#else - lpm - mov r5,r0 -#endif - mov r30,r6 -#if defined(RAMPZ) - elpm r6,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r6,Z -#elif defined(__AVR_TINY__) - ld r6,Z -#else - lpm - mov r6,r0 -#endif - mov r30,r7 -#if defined(RAMPZ) - elpm r7,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r7,Z -#elif defined(__AVR_TINY__) - ld r7,Z -#else - lpm - mov r7,r0 -#endif - mov r30,r8 -#if defined(RAMPZ) - elpm r8,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r8,Z -#elif defined(__AVR_TINY__) - ld r8,Z -#else - lpm - mov r8,r0 -#endif - mov r30,r9 -#if defined(RAMPZ) - elpm r9,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r9,Z -#elif defined(__AVR_TINY__) - ld r9,Z -#else - lpm - mov r9,r0 -#endif - mov r30,r10 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - mov r30,r11 -#if defined(RAMPZ) - elpm r11,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r11,Z -#elif defined(__AVR_TINY__) - ld r11,Z -#else - lpm - mov r11,r0 -#endif - cp r26,r1 - breq 651f - ldi r30,lo8(table_3) - ldi r31,hi8(table_3) -#if defined(RAMPZ) - ldi r27,hh8(table_3) - out _SFR_IO_ADDR(RAMPZ),r27 -#endif - rjmp 139b -651: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+33 - ldd r27,Y+34 - st X+,r18 - st X+,r19 - st X+,r20 - st X+,r21 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - adiw r28,34 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size skinny_128_256_decrypt, .-skinny_128_256_decrypt - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.c deleted file mode 100644 index 579ced1..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.c +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-skinny128.h" -#include "internal-skinnyutil.h" -#include "internal-util.h" -#include - -#if !defined(__AVR__) - -STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) -{ - /* This function is used to fast-forward the TK1 tweak value - * to the value at the end of the key schedule for decryption. - * - * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 - * with 48 rounds does not need any fast forwarding applied. - * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds - * are equivalent to applying the permutation 8 times: - * - * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] - */ - uint32_t row0 = tk[0]; - uint32_t row1 = tk[1]; - uint32_t row2 = tk[2]; - uint32_t row3 = tk[3]; - tk[0] = ((row1 >> 8) & 0x0000FFFFU) | - ((row0 >> 8) & 0x00FF0000U) | - ((row0 << 8) & 0xFF000000U); - tk[1] = ((row1 >> 24) & 0x000000FFU) | - ((row0 << 8) & 0x00FFFF00U) | - ((row1 << 24) & 0xFF000000U); - tk[2] = ((row3 >> 8) & 0x0000FFFFU) | - ((row2 >> 8) & 0x00FF0000U) | - ((row2 << 8) & 0xFF000000U); - tk[3] = ((row3 >> 24) & 0x000000FFU) | - ((row2 << 8) & 0x00FFFF00U) | - ((row3 << 24) & 0xFF000000U); -} - -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); - memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); -#else - /* Set the initial states of TK1, TK2, and TK3 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Set up the key schedule using TK2 and TK3. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); - - /* Permute TK2 and TK3 for the next round */ - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - - /* Apply the LFSR's to TK2 and TK3 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } -#endif -} - -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t TK3[4]; - uint8_t rc = 0x15; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Permute TK1 to fast-forward it to the end of the key schedule */ - skinny128_fast_forward_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_fast_forward_tk(TK2); - skinny128_fast_forward_tk(TK3); - for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2 and TK3. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - skinny128_LFSR3(TK3[2]); - skinny128_LFSR3(TK3[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_inv_permute_tk(TK3); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); - skinny128_LFSR2(TK3[2]); - skinny128_LFSR2(TK3[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK3[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); - TK2[0] = le_load_word32(tk2); - TK2[1] = le_load_word32(tk2 + 4); - TK2[2] = le_load_word32(tk2 + 8); - TK2[3] = le_load_word32(tk2 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK3[0] = le_load_word32(ks->TK3); - TK3[1] = le_load_word32(ks->TK3 + 4); - TK3[2] = le_load_word32(ks->TK3 + 8); - TK3[3] = le_load_word32(ks->TK3 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; - s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK3); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t TK3[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - TK3[0] = le_load_word32(key + 32); - TK3[1] = le_load_word32(key + 36); - TK3[2] = le_load_word32(key + 40); - TK3[3] = le_load_word32(key + 44); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1, TK2, and TK3 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_permute_tk(TK3); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR3(TK3[0]); - skinny128_LFSR3(TK3[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) -{ -#if !SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint32_t *schedule; - unsigned round; - uint8_t rc; -#endif - -#if SKINNY_128_SMALL_SCHEDULE - /* Copy the input key as-is when using the small key schedule version */ - memcpy(ks->TK1, key, sizeof(ks->TK1)); - memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); -#else - /* Set the initial states of TK1 and TK2 */ - memcpy(ks->TK1, key, 16); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Set up the key schedule using TK2. TK1 is not added - * to the key schedule because we will derive that part of the - * schedule during encryption operations */ - schedule = ks->k; - rc = 0; - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { - /* XOR the round constants with the current schedule words. - * The round constants for the 3rd and 4th rows are - * fixed and will be applied during encryption. */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - schedule[0] = TK2[0] ^ (rc & 0x0F); - schedule[1] = TK2[1] ^ (rc >> 4); - - /* Permute TK2 for the next round */ - skinny128_permute_tk(TK2); - - /* Apply the LFSR to TK2 */ - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } -#endif -} - -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0; -#else - const uint32_t *schedule = ks->k; -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1 */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); -#endif - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; -#endif - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); -#else - schedule += 2; -#endif - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; -#if SKINNY_128_SMALL_SCHEDULE - uint32_t TK2[4]; - uint8_t rc = 0x09; -#else - const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); -#endif - uint32_t temp; - unsigned round; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakable part of the state, TK1. - * There is no need to fast-forward TK1 because the value at - * the end of the key schedule is the same as at the start */ - TK1[0] = le_load_word32(ks->TK1); - TK1[1] = le_load_word32(ks->TK1 + 4); - TK1[2] = le_load_word32(ks->TK1 + 8); - TK1[3] = le_load_word32(ks->TK1 + 12); -#if SKINNY_128_SMALL_SCHEDULE - TK2[0] = le_load_word32(ks->TK2); - TK2[1] = le_load_word32(ks->TK2 + 4); - TK2[2] = le_load_word32(ks->TK2 + 8); - TK2[3] = le_load_word32(ks->TK2 + 12); - for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { - // Also fast-forward the LFSR's on every byte of TK2. - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - skinny128_LFSR2(TK2[2]); - skinny128_LFSR2(TK2[3]); - } -#endif - - /* Perform all decryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Inverse permutation on TK1 for this round */ - skinny128_inv_permute_tk(TK1); -#if SKINNY_128_SMALL_SCHEDULE - skinny128_inv_permute_tk(TK2); - skinny128_LFSR3(TK2[2]); - skinny128_LFSR3(TK2[3]); -#endif - - /* Inverse mix of the columns */ - temp = s3; - s3 = s0; - s0 = s1; - s1 = s2; - s3 ^= temp; - s2 = temp ^ s0; - s1 ^= s2; - - /* Inverse shift of the rows */ - s1 = leftRotate24(s1); - s2 = leftRotate16(s2); - s3 = leftRotate8(s3); - - /* Apply the subkey for this round */ -#if SKINNY_128_SMALL_SCHEDULE - rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); -#else - s0 ^= schedule[0] ^ TK1[0]; - s1 ^= schedule[1] ^ TK1[1]; - schedule -= 2; -#endif - s2 ^= 0x02; - - /* Apply the inverse of the S-box to all bytes in the state */ - skinny128_inv_sbox(s0); - skinny128_inv_sbox(s1); - skinny128_inv_sbox(s2); - skinny128_inv_sbox(s3); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t TK1[4]; - uint32_t TK2[4]; - uint32_t temp; - unsigned round; - uint8_t rc = 0; - - /* Unpack the input block into the state array */ - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Make a local copy of the tweakey */ - TK1[0] = le_load_word32(key); - TK1[1] = le_load_word32(key + 4); - TK1[2] = le_load_word32(key + 8); - TK1[3] = le_load_word32(key + 12); - TK2[0] = le_load_word32(key + 16); - TK2[1] = le_load_word32(key + 20); - TK2[2] = le_load_word32(key + 24); - TK2[3] = le_load_word32(key + 28); - - /* Perform all encryption rounds */ - for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { - /* Apply the S-box to all bytes in the state */ - skinny128_sbox(s0); - skinny128_sbox(s1); - skinny128_sbox(s2); - skinny128_sbox(s3); - - /* XOR the round constant and the subkey for this round */ - rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; - rc &= 0x3F; - s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); - s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); - s2 ^= 0x02; - - /* Shift the cells in the rows right, which moves the cell - * values up closer to the MSB. That is, we do a left rotate - * on the word to rotate the cells in the word right */ - s1 = leftRotate8(s1); - s2 = leftRotate16(s2); - s3 = leftRotate24(s3); - - /* Mix the columns */ - s1 ^= s2; - s2 ^= s0; - temp = s3 ^ s2; - s3 = s2; - s2 = s1; - s1 = s0; - s0 = temp; - - /* Permute TK1 and TK2 for the next round */ - skinny128_permute_tk(TK1); - skinny128_permute_tk(TK2); - skinny128_LFSR2(TK2[0]); - skinny128_LFSR2(TK2[1]); - } - - /* Pack the result into the output buffer */ - le_store_word32(output, s0); - le_store_word32(output + 4, s1); - le_store_word32(output + 8, s2); - le_store_word32(output + 12, s3); -} - -#else /* __AVR__ */ - -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2) -{ - memcpy(ks->TK2, tk2, 16); - skinny_128_384_encrypt(ks, output, input); -} - -#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.h deleted file mode 100644 index 2bfda3c..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinny128.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNY128_H -#define LW_INTERNAL_SKINNY128_H - -/** - * \file internal-skinny128.h - * \brief SKINNY-128 block cipher family. - * - * References: https://eprint.iacr.org/2016/660.pdf, - * https://sites.google.com/site/skinnycipher/ - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \def SKINNY_128_SMALL_SCHEDULE - * \brief Defined to 1 to use the small key schedule version of SKINNY-128. - */ -#if defined(__AVR__) -#define SKINNY_128_SMALL_SCHEDULE 1 -#else -#define SKINNY_128_SMALL_SCHEDULE 0 -#endif - -/** - * \brief Size of a block for SKINNY-128 block ciphers. - */ -#define SKINNY_128_BLOCK_SIZE 16 - -/** - * \brief Number of rounds for SKINNY-128-384. - */ -#define SKINNY_128_384_ROUNDS 56 - -/** - * \brief Structure of the key schedule for SKINNY-128-384. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; - - /** TK3 for the small key schedule */ - uint8_t TK3[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_384_ROUNDS * 2]; -#endif - -} skinny_128_384_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-384. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_384_init - (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_encrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-384. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_384_decrypt - (const skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly - * provided TK2 value. - * - * \param ks Points to the SKINNY-128-384 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tk2 TK2 value that should be updated on the fly. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when both TK1 and TK2 change from block to block. - * When the key is initialized with skinny_128_384_init(), the TK2 part of - * the key value should be set to zero. - * - * \note Some versions of this function may modify the key schedule to - * copy tk2 into place. - */ -void skinny_128_384_encrypt_tk2 - (skinny_128_384_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, const unsigned char *tk2); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-384 and a - * fully specified tweakey value. - * - * \param key Points to the 384-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-384 but - * more memory-efficient. - */ -void skinny_128_384_encrypt_tk_full - (const unsigned char key[48], unsigned char *output, - const unsigned char *input); - -/** - * \brief Number of rounds for SKINNY-128-256. - */ -#define SKINNY_128_256_ROUNDS 48 - -/** - * \brief Structure of the key schedule for SKINNY-128-256. - */ -typedef struct -{ - /** TK1 for the tweakable part of the key schedule */ - uint8_t TK1[16]; - -#if SKINNY_128_SMALL_SCHEDULE - /** TK2 for the small key schedule */ - uint8_t TK2[16]; -#else - /** Words of the full key schedule */ - uint32_t k[SKINNY_128_256_ROUNDS * 2]; -#endif - -} skinny_128_256_key_schedule_t; - -/** - * \brief Initializes the key schedule for SKINNY-128-256. - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - */ -void skinny_128_256_init - (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_encrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with SKINNY-128-256. - * - * \param ks Points to the SKINNY-128-256 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void skinny_128_256_decrypt - (const skinny_128_256_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with SKINNY-128-256 and a - * fully specified tweakey value. - * - * \param key Points to the 256-bit tweakey value. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version is useful when the entire tweakey changes from block to - * block. It is slower than the other versions of SKINNY-128-256 but - * more memory-efficient. - */ -void skinny_128_256_encrypt_tk_full - (const unsigned char key[32], unsigned char *output, - const unsigned char *input); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinnyutil.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinnyutil.h deleted file mode 100644 index 83136cb..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-skinnyutil.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SKINNYUTIL_H -#define LW_INTERNAL_SKINNYUTIL_H - -/** - * \file internal-skinnyutil.h - * \brief Utilities to help implement SKINNY and its variants. - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @cond skinnyutil */ - -/* Utilities for implementing SKINNY-128 */ - -#define skinny128_LFSR2(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ - (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ - } while (0) - - -#define skinny128_LFSR3(x) \ - do { \ - uint32_t _x = (x); \ - (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ - (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) -#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) - -#define skinny128_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint32_t row2 = tk[2]; \ - uint32_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 16) | (row3 >> 16); \ - tk[0] = ((row2 >> 8) & 0x000000FFU) | \ - ((row2 << 16) & 0x00FF0000U) | \ - ( row3 & 0xFF00FF00U); \ - tk[1] = ((row2 >> 16) & 0x000000FFU) | \ - (row2 & 0xFF000000U) | \ - ((row3 << 8) & 0x0000FF00U) | \ - ( row3 & 0x00FF0000U); \ - } while (0) - -#define skinny128_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint32_t row0 = tk[0]; \ - uint32_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 >> 16) & 0x000000FFU) | \ - ((row0 << 8) & 0x0000FF00U) | \ - ((row1 << 16) & 0x00FF0000U) | \ - ( row1 & 0xFF000000U); \ - tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ - ((row0 << 16) & 0xFF000000U) | \ - ((row1 >> 16) & 0x000000FFU) | \ - ((row1 << 8) & 0x00FF0000U); \ - } while (0) - -/* - * Apply the SKINNY sbox. The original version from the specification is - * equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE(x) - * ((((x) & 0x01010101U) << 2) | - * (((x) & 0x06060606U) << 5) | - * (((x) & 0x20202020U) >> 5) | - * (((x) & 0xC8C8C8C8U) >> 2) | - * (((x) & 0x10101010U) >> 1)) - * - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE(x); - * x = SBOX_MIX(x); - * return SBOX_SWAP(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ - y = (((x >> 5) & (x << 1)) & 0x04040404U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ - x = ((x & 0x08080808U) << 1) | \ - ((x & 0x32323232U) << 2) | \ - ((x & 0x01010101U) << 5) | \ - ((x & 0x80808080U) >> 6) | \ - ((x & 0x40404040U) >> 4) | \ - ((x & 0x04040404U) >> 2); \ -} while (0) - -/* - * Apply the inverse of the SKINNY sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) - * #define SBOX_SWAP(x) - * (((x) & 0xF9F9F9F9U) | - * (((x) >> 1) & 0x02020202U) | - * (((x) << 1) & 0x04040404U)) - * #define SBOX_PERMUTE_INV(x) - * ((((x) & 0x08080808U) << 1) | - * (((x) & 0x32323232U) << 2) | - * (((x) & 0x01010101U) << 5) | - * (((x) & 0xC0C0C0C0U) >> 5) | - * (((x) & 0x04040404U) >> 2)) - * - * x = SBOX_SWAP(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_PERMUTE_INV(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one - * final permuatation. This reduces the number of shift operations. - */ -#define skinny128_inv_sbox(x) \ -do { \ - uint32_t y; \ - \ - /* Mix the bits */ \ - x = ~x; \ - y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ - x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ - y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ - x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ - y = (((x << 2) & (x << 1)) & 0x80808080U); \ - x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ - y = (((x << 5) & (x << 1)) & 0x20202020U); \ - x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ - x = ~x; \ - \ - /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ - /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ - x = ((x & 0x01010101U) << 2) | \ - ((x & 0x04040404U) << 4) | \ - ((x & 0x02020202U) << 6) | \ - ((x & 0x20202020U) >> 5) | \ - ((x & 0xC8C8C8C8U) >> 2) | \ - ((x & 0x10101010U) >> 1); \ -} while (0) - -/* Utilities for implementing SKINNY-64 */ - -#define skinny64_LFSR2(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ - } while (0) - -#define skinny64_LFSR3(x) \ - do { \ - uint16_t _x = (x); \ - (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ - } while (0) - -/* LFSR2 and LFSR3 are inverses of each other */ -#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) -#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) - -#define skinny64_permute_tk(tk) \ - do { \ - /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ - uint16_t row2 = tk[2]; \ - uint16_t row3 = tk[3]; \ - tk[2] = tk[0]; \ - tk[3] = tk[1]; \ - row3 = (row3 << 8) | (row3 >> 8); \ - tk[0] = ((row2 << 4) & 0xF000U) | \ - ((row2 >> 8) & 0x00F0U) | \ - ( row3 & 0x0F0FU); \ - tk[1] = ((row2 << 8) & 0xF000U) | \ - ((row3 >> 4) & 0x0F00U) | \ - ( row3 & 0x00F0U) | \ - ( row2 & 0x000FU); \ - } while (0) - -#define skinny64_inv_permute_tk(tk) \ - do { \ - /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ - uint16_t row0 = tk[0]; \ - uint16_t row1 = tk[1]; \ - tk[0] = tk[2]; \ - tk[1] = tk[3]; \ - tk[2] = ((row0 << 8) & 0xF000U) | \ - ((row0 >> 4) & 0x0F00U) | \ - ((row1 >> 8) & 0x00F0U) | \ - ( row1 & 0x000FU); \ - tk[3] = ((row1 << 8) & 0xF000U) | \ - ((row0 << 8) & 0x0F00U) | \ - ((row1 >> 4) & 0x00F0U) | \ - ((row0 >> 8) & 0x000FU); \ - } while (0) - -/* - * Apply the SKINNY-64 sbox. The original version from the - * specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT(x) - * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT(x); - * return SBOX_MIX(x); - * - * However, we can mix the bits in their original positions and then - * delay the SBOX_SHIFT steps to be performed with one final rotation. - * This reduces the number of required shift operations from 14 to 10. - * - * We can further reduce the number of NOT operations from 4 to 2 - * using the technique from https://github.com/kste/skinny_avx to - * convert NOR-XOR operations into AND-XOR operations by converting - * the S-box into its NOT-inverse. - */ -#define skinny64_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ - x = ~x; \ - x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ -} while (0) - -/* - * Apply the inverse of the SKINNY-64 sbox. The original version - * from the specification is equivalent to: - * - * #define SBOX_MIX(x) - * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) - * #define SBOX_SHIFT_INV(x) - * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) - * - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * x = SBOX_MIX(x); - * x = SBOX_SHIFT_INV(x); - * return SBOX_MIX(x); - */ -#define skinny64_inv_sbox(x) \ -do { \ - x = ~x; \ - x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ - x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ - x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ - x = ~x; \ - x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ -} while (0) - -/** @endcond */ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-util.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.c deleted file mode 100644 index 0abdeff..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "skinny-hash.h" -#include "internal-skinny128.h" -#include "internal-util.h" -#include - -aead_hash_algorithm_t const skinny_tk3_hash_algorithm = { - "SKINNY-tk3-HASH", - sizeof(int), - SKINNY_HASH_SIZE, - AEAD_FLAG_NONE, - skinny_tk3_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const skinny_tk2_hash_algorithm = { - "SKINNY-tk2-HASH", - sizeof(int), - SKINNY_HASH_SIZE, - AEAD_FLAG_NONE, - skinny_tk2_hash, - (aead_hash_init_t)0, - (aead_hash_update_t)0, - (aead_hash_finalize_t)0, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \brief Size of the permutation state for SKINNY-tk3-HASH. - */ -#define SKINNY_TK3_STATE_SIZE 48 - -/** - * \brief Size of the permutation state for SKINNY-tk2-HASH. - */ -#define SKINNY_TK2_STATE_SIZE 32 - -/** - * \brief Rate of absorbing data for SKINNY-tk3-HASH. - */ -#define SKINNY_TK3_HASH_RATE 16 - -/** - * \brief Rate of absorbing data for SKINNY-tk2-HASH. - */ -#define SKINNY_TK2_HASH_RATE 4 - -/** - * \brief Input block that is encrypted with the state for each - * block permutation of SKINNY-tk3-HASH or SKINNY-tk2-HASH. - */ -static unsigned char const skinny_hash_block[48] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -}; - -/** - * \brief Permutes the internal state for SKINNY-tk3-HASH. - * - * \param state The state to be permuted. - */ -static void skinny_tk3_permute(unsigned char state[SKINNY_TK3_STATE_SIZE]) -{ - unsigned char temp[SKINNY_TK3_STATE_SIZE]; - skinny_128_384_encrypt_tk_full(state, temp, skinny_hash_block); - skinny_128_384_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); - skinny_128_384_encrypt_tk_full(state, temp + 32, skinny_hash_block + 32); - memcpy(state, temp, SKINNY_TK3_STATE_SIZE); -} - -/** - * \brief Permutes the internal state for SKINNY-tk2-HASH. - * - * \param state The state to be permuted. - */ -static void skinny_tk2_permute(unsigned char state[SKINNY_TK2_STATE_SIZE]) -{ - unsigned char temp[SKINNY_TK2_STATE_SIZE]; - skinny_128_256_encrypt_tk_full(state, temp, skinny_hash_block); - skinny_128_256_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); - memcpy(state, temp, SKINNY_TK2_STATE_SIZE); -} - -int skinny_tk3_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[SKINNY_TK3_STATE_SIZE]; - unsigned temp; - - /* Initialize the hash state */ - memset(state, 0, sizeof(state)); - state[SKINNY_TK3_HASH_RATE] = 0x80; - - /* Process as many full blocks as possible */ - while (inlen >= SKINNY_TK3_HASH_RATE) { - lw_xor_block(state, in, SKINNY_TK3_HASH_RATE); - skinny_tk3_permute(state); - in += SKINNY_TK3_HASH_RATE; - inlen -= SKINNY_TK3_HASH_RATE; - } - - /* Pad and process the last block */ - temp = (unsigned)inlen; - lw_xor_block(state, in, temp); - state[temp] ^= 0x80; /* padding */ - skinny_tk3_permute(state); - - /* Generate the hash output */ - memcpy(out, state, 16); - skinny_tk3_permute(state); - memcpy(out + 16, state, 16); - return 0; -} - -int skinny_tk2_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - unsigned char state[SKINNY_TK2_STATE_SIZE]; - unsigned temp; - - /* Initialize the hash state */ - memset(state, 0, sizeof(state)); - state[SKINNY_TK2_HASH_RATE] = 0x80; - - /* Process as many full blocks as possible */ - while (inlen >= SKINNY_TK2_HASH_RATE) { - lw_xor_block(state, in, SKINNY_TK2_HASH_RATE); - skinny_tk2_permute(state); - in += SKINNY_TK2_HASH_RATE; - inlen -= SKINNY_TK2_HASH_RATE; - } - - /* Pad and process the last block */ - temp = (unsigned)inlen; - lw_xor_block(state, in, temp); - state[temp] ^= 0x80; /* padding */ - skinny_tk2_permute(state); - - /* Generate the hash output */ - memcpy(out, state, 16); - skinny_tk2_permute(state); - memcpy(out + 16, state, 16); - return 0; -} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.h deleted file mode 100644 index f75ce9f..0000000 --- a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys-avr/skinny-hash.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SKINNY_HASH_H -#define LWCRYPTO_SKINNY_HASH_H - -#include "aead-common.h" - -/** - * \file skinny-hash.h - * \brief Hash algorithms based on the SKINNY block cipher. - * - * The SKINNY-AEAD family includes two hash algorithms: - * - * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the - * SKINNY-128-384 tweakable block cipher. This is the primary hashing - * member of the family. - * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the - * SKINNY-128-256 tweakable block cipher. - * - * References: https://sites.google.com/site/skinnycipher/home - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the hash output for SKINNY-tk3-HASH and SKINNY-tk2-HASH. - */ -#define SKINNY_HASH_SIZE 32 - -/** - * \brief Meta-information block for the SKINNY-tk3-HASH algorithm. - */ -extern aead_hash_algorithm_t const skinny_tk3_hash_algorithm; - -/** - * \brief Meta-information block for the SKINNY-tk2-HASH algorithm. - */ -extern aead_hash_algorithm_t const skinny_tk2_hash_algorithm; - -/** - * \brief Hashes a block of input data with SKINNY-tk3-HASH to - * generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * SKINNY_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int skinny_tk3_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Hashes a block of input data with SKINNY-tk2-HASH to - * generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * SKINNY_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int skinny_tk2_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/api.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/hash.c new file mode 100644 index 0000000..c51ca3f --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "skinny-hash.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return skinny_tk3_hash(out, in, inlen); +} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128-avr.S b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128-avr.S new file mode 100644 index 0000000..d342cd5 --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128-avr.S @@ -0,0 +1,10099 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 256 +table_0: + .byte 101 + .byte 76 + .byte 106 + .byte 66 + .byte 75 + .byte 99 + .byte 67 + .byte 107 + .byte 85 + .byte 117 + .byte 90 + .byte 122 + .byte 83 + .byte 115 + .byte 91 + .byte 123 + .byte 53 + .byte 140 + .byte 58 + .byte 129 + .byte 137 + .byte 51 + .byte 128 + .byte 59 + .byte 149 + .byte 37 + .byte 152 + .byte 42 + .byte 144 + .byte 35 + .byte 153 + .byte 43 + .byte 229 + .byte 204 + .byte 232 + .byte 193 + .byte 201 + .byte 224 + .byte 192 + .byte 233 + .byte 213 + .byte 245 + .byte 216 + .byte 248 + .byte 208 + .byte 240 + .byte 217 + .byte 249 + .byte 165 + .byte 28 + .byte 168 + .byte 18 + .byte 27 + .byte 160 + .byte 19 + .byte 169 + .byte 5 + .byte 181 + .byte 10 + .byte 184 + .byte 3 + .byte 176 + .byte 11 + .byte 185 + .byte 50 + .byte 136 + .byte 60 + .byte 133 + .byte 141 + .byte 52 + .byte 132 + .byte 61 + .byte 145 + .byte 34 + .byte 156 + .byte 44 + .byte 148 + .byte 36 + .byte 157 + .byte 45 + .byte 98 + .byte 74 + .byte 108 + .byte 69 + .byte 77 + .byte 100 + .byte 68 + .byte 109 + .byte 82 + .byte 114 + .byte 92 + .byte 124 + .byte 84 + .byte 116 + .byte 93 + .byte 125 + .byte 161 + .byte 26 + .byte 172 + .byte 21 + .byte 29 + .byte 164 + .byte 20 + .byte 173 + .byte 2 + .byte 177 + .byte 12 + .byte 188 + .byte 4 + .byte 180 + .byte 13 + .byte 189 + .byte 225 + .byte 200 + .byte 236 + .byte 197 + .byte 205 + .byte 228 + .byte 196 + .byte 237 + .byte 209 + .byte 241 + .byte 220 + .byte 252 + .byte 212 + .byte 244 + .byte 221 + .byte 253 + .byte 54 + .byte 142 + .byte 56 + .byte 130 + .byte 139 + .byte 48 + .byte 131 + .byte 57 + .byte 150 + .byte 38 + .byte 154 + .byte 40 + .byte 147 + .byte 32 + .byte 155 + .byte 41 + .byte 102 + .byte 78 + .byte 104 + .byte 65 + .byte 73 + .byte 96 + .byte 64 + .byte 105 + .byte 86 + .byte 118 + .byte 88 + .byte 120 + .byte 80 + .byte 112 + .byte 89 + .byte 121 + .byte 166 + .byte 30 + .byte 170 + .byte 17 + .byte 25 + .byte 163 + .byte 16 + .byte 171 + .byte 6 + .byte 182 + .byte 8 + .byte 186 + .byte 0 + .byte 179 + .byte 9 + .byte 187 + .byte 230 + .byte 206 + .byte 234 + .byte 194 + .byte 203 + .byte 227 + .byte 195 + .byte 235 + .byte 214 + .byte 246 + .byte 218 + .byte 250 + .byte 211 + .byte 243 + .byte 219 + .byte 251 + .byte 49 + .byte 138 + .byte 62 + .byte 134 + .byte 143 + .byte 55 + .byte 135 + .byte 63 + .byte 146 + .byte 33 + .byte 158 + .byte 46 + .byte 151 + .byte 39 + .byte 159 + .byte 47 + .byte 97 + .byte 72 + .byte 110 + .byte 70 + .byte 79 + .byte 103 + .byte 71 + .byte 111 + .byte 81 + .byte 113 + .byte 94 + .byte 126 + .byte 87 + .byte 119 + .byte 95 + .byte 127 + .byte 162 + .byte 24 + .byte 174 + .byte 22 + .byte 31 + .byte 167 + .byte 23 + .byte 175 + .byte 1 + .byte 178 + .byte 14 + .byte 190 + .byte 7 + .byte 183 + .byte 15 + .byte 191 + .byte 226 + .byte 202 + .byte 238 + .byte 198 + .byte 207 + .byte 231 + .byte 199 + .byte 239 + .byte 210 + .byte 242 + .byte 222 + .byte 254 + .byte 215 + .byte 247 + .byte 223 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 256 +table_1: + .byte 172 + .byte 232 + .byte 104 + .byte 60 + .byte 108 + .byte 56 + .byte 168 + .byte 236 + .byte 170 + .byte 174 + .byte 58 + .byte 62 + .byte 106 + .byte 110 + .byte 234 + .byte 238 + .byte 166 + .byte 163 + .byte 51 + .byte 54 + .byte 102 + .byte 99 + .byte 227 + .byte 230 + .byte 225 + .byte 164 + .byte 97 + .byte 52 + .byte 49 + .byte 100 + .byte 161 + .byte 228 + .byte 141 + .byte 201 + .byte 73 + .byte 29 + .byte 77 + .byte 25 + .byte 137 + .byte 205 + .byte 139 + .byte 143 + .byte 27 + .byte 31 + .byte 75 + .byte 79 + .byte 203 + .byte 207 + .byte 133 + .byte 192 + .byte 64 + .byte 21 + .byte 69 + .byte 16 + .byte 128 + .byte 197 + .byte 130 + .byte 135 + .byte 18 + .byte 23 + .byte 66 + .byte 71 + .byte 194 + .byte 199 + .byte 150 + .byte 147 + .byte 3 + .byte 6 + .byte 86 + .byte 83 + .byte 211 + .byte 214 + .byte 209 + .byte 148 + .byte 81 + .byte 4 + .byte 1 + .byte 84 + .byte 145 + .byte 212 + .byte 156 + .byte 216 + .byte 88 + .byte 12 + .byte 92 + .byte 8 + .byte 152 + .byte 220 + .byte 154 + .byte 158 + .byte 10 + .byte 14 + .byte 90 + .byte 94 + .byte 218 + .byte 222 + .byte 149 + .byte 208 + .byte 80 + .byte 5 + .byte 85 + .byte 0 + .byte 144 + .byte 213 + .byte 146 + .byte 151 + .byte 2 + .byte 7 + .byte 82 + .byte 87 + .byte 210 + .byte 215 + .byte 157 + .byte 217 + .byte 89 + .byte 13 + .byte 93 + .byte 9 + .byte 153 + .byte 221 + .byte 155 + .byte 159 + .byte 11 + .byte 15 + .byte 91 + .byte 95 + .byte 219 + .byte 223 + .byte 22 + .byte 19 + .byte 131 + .byte 134 + .byte 70 + .byte 67 + .byte 195 + .byte 198 + .byte 65 + .byte 20 + .byte 193 + .byte 132 + .byte 17 + .byte 68 + .byte 129 + .byte 196 + .byte 28 + .byte 72 + .byte 200 + .byte 140 + .byte 76 + .byte 24 + .byte 136 + .byte 204 + .byte 26 + .byte 30 + .byte 138 + .byte 142 + .byte 74 + .byte 78 + .byte 202 + .byte 206 + .byte 53 + .byte 96 + .byte 224 + .byte 165 + .byte 101 + .byte 48 + .byte 160 + .byte 229 + .byte 50 + .byte 55 + .byte 162 + .byte 167 + .byte 98 + .byte 103 + .byte 226 + .byte 231 + .byte 61 + .byte 105 + .byte 233 + .byte 173 + .byte 109 + .byte 57 + .byte 169 + .byte 237 + .byte 59 + .byte 63 + .byte 171 + .byte 175 + .byte 107 + .byte 111 + .byte 235 + .byte 239 + .byte 38 + .byte 35 + .byte 179 + .byte 182 + .byte 118 + .byte 115 + .byte 243 + .byte 246 + .byte 113 + .byte 36 + .byte 241 + .byte 180 + .byte 33 + .byte 116 + .byte 177 + .byte 244 + .byte 44 + .byte 120 + .byte 248 + .byte 188 + .byte 124 + .byte 40 + .byte 184 + .byte 252 + .byte 42 + .byte 46 + .byte 186 + .byte 190 + .byte 122 + .byte 126 + .byte 250 + .byte 254 + .byte 37 + .byte 112 + .byte 240 + .byte 181 + .byte 117 + .byte 32 + .byte 176 + .byte 245 + .byte 34 + .byte 39 + .byte 178 + .byte 183 + .byte 114 + .byte 119 + .byte 242 + .byte 247 + .byte 45 + .byte 121 + .byte 249 + .byte 189 + .byte 125 + .byte 41 + .byte 185 + .byte 253 + .byte 43 + .byte 47 + .byte 187 + .byte 191 + .byte 123 + .byte 127 + .byte 251 + .byte 255 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_2, @object + .size table_2, 256 +table_2: + .byte 0 + .byte 2 + .byte 4 + .byte 6 + .byte 8 + .byte 10 + .byte 12 + .byte 14 + .byte 16 + .byte 18 + .byte 20 + .byte 22 + .byte 24 + .byte 26 + .byte 28 + .byte 30 + .byte 32 + .byte 34 + .byte 36 + .byte 38 + .byte 40 + .byte 42 + .byte 44 + .byte 46 + .byte 48 + .byte 50 + .byte 52 + .byte 54 + .byte 56 + .byte 58 + .byte 60 + .byte 62 + .byte 65 + .byte 67 + .byte 69 + .byte 71 + .byte 73 + .byte 75 + .byte 77 + .byte 79 + .byte 81 + .byte 83 + .byte 85 + .byte 87 + .byte 89 + .byte 91 + .byte 93 + .byte 95 + .byte 97 + .byte 99 + .byte 101 + .byte 103 + .byte 105 + .byte 107 + .byte 109 + .byte 111 + .byte 113 + .byte 115 + .byte 117 + .byte 119 + .byte 121 + .byte 123 + .byte 125 + .byte 127 + .byte 128 + .byte 130 + .byte 132 + .byte 134 + .byte 136 + .byte 138 + .byte 140 + .byte 142 + .byte 144 + .byte 146 + .byte 148 + .byte 150 + .byte 152 + .byte 154 + .byte 156 + .byte 158 + .byte 160 + .byte 162 + .byte 164 + .byte 166 + .byte 168 + .byte 170 + .byte 172 + .byte 174 + .byte 176 + .byte 178 + .byte 180 + .byte 182 + .byte 184 + .byte 186 + .byte 188 + .byte 190 + .byte 193 + .byte 195 + .byte 197 + .byte 199 + .byte 201 + .byte 203 + .byte 205 + .byte 207 + .byte 209 + .byte 211 + .byte 213 + .byte 215 + .byte 217 + .byte 219 + .byte 221 + .byte 223 + .byte 225 + .byte 227 + .byte 229 + .byte 231 + .byte 233 + .byte 235 + .byte 237 + .byte 239 + .byte 241 + .byte 243 + .byte 245 + .byte 247 + .byte 249 + .byte 251 + .byte 253 + .byte 255 + .byte 1 + .byte 3 + .byte 5 + .byte 7 + .byte 9 + .byte 11 + .byte 13 + .byte 15 + .byte 17 + .byte 19 + .byte 21 + .byte 23 + .byte 25 + .byte 27 + .byte 29 + .byte 31 + .byte 33 + .byte 35 + .byte 37 + .byte 39 + .byte 41 + .byte 43 + .byte 45 + .byte 47 + .byte 49 + .byte 51 + .byte 53 + .byte 55 + .byte 57 + .byte 59 + .byte 61 + .byte 63 + .byte 64 + .byte 66 + .byte 68 + .byte 70 + .byte 72 + .byte 74 + .byte 76 + .byte 78 + .byte 80 + .byte 82 + .byte 84 + .byte 86 + .byte 88 + .byte 90 + .byte 92 + .byte 94 + .byte 96 + .byte 98 + .byte 100 + .byte 102 + .byte 104 + .byte 106 + .byte 108 + .byte 110 + .byte 112 + .byte 114 + .byte 116 + .byte 118 + .byte 120 + .byte 122 + .byte 124 + .byte 126 + .byte 129 + .byte 131 + .byte 133 + .byte 135 + .byte 137 + .byte 139 + .byte 141 + .byte 143 + .byte 145 + .byte 147 + .byte 149 + .byte 151 + .byte 153 + .byte 155 + .byte 157 + .byte 159 + .byte 161 + .byte 163 + .byte 165 + .byte 167 + .byte 169 + .byte 171 + .byte 173 + .byte 175 + .byte 177 + .byte 179 + .byte 181 + .byte 183 + .byte 185 + .byte 187 + .byte 189 + .byte 191 + .byte 192 + .byte 194 + .byte 196 + .byte 198 + .byte 200 + .byte 202 + .byte 204 + .byte 206 + .byte 208 + .byte 210 + .byte 212 + .byte 214 + .byte 216 + .byte 218 + .byte 220 + .byte 222 + .byte 224 + .byte 226 + .byte 228 + .byte 230 + .byte 232 + .byte 234 + .byte 236 + .byte 238 + .byte 240 + .byte 242 + .byte 244 + .byte 246 + .byte 248 + .byte 250 + .byte 252 + .byte 254 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_3, @object + .size table_3, 256 +table_3: + .byte 0 + .byte 128 + .byte 1 + .byte 129 + .byte 2 + .byte 130 + .byte 3 + .byte 131 + .byte 4 + .byte 132 + .byte 5 + .byte 133 + .byte 6 + .byte 134 + .byte 7 + .byte 135 + .byte 8 + .byte 136 + .byte 9 + .byte 137 + .byte 10 + .byte 138 + .byte 11 + .byte 139 + .byte 12 + .byte 140 + .byte 13 + .byte 141 + .byte 14 + .byte 142 + .byte 15 + .byte 143 + .byte 16 + .byte 144 + .byte 17 + .byte 145 + .byte 18 + .byte 146 + .byte 19 + .byte 147 + .byte 20 + .byte 148 + .byte 21 + .byte 149 + .byte 22 + .byte 150 + .byte 23 + .byte 151 + .byte 24 + .byte 152 + .byte 25 + .byte 153 + .byte 26 + .byte 154 + .byte 27 + .byte 155 + .byte 28 + .byte 156 + .byte 29 + .byte 157 + .byte 30 + .byte 158 + .byte 31 + .byte 159 + .byte 160 + .byte 32 + .byte 161 + .byte 33 + .byte 162 + .byte 34 + .byte 163 + .byte 35 + .byte 164 + .byte 36 + .byte 165 + .byte 37 + .byte 166 + .byte 38 + .byte 167 + .byte 39 + .byte 168 + .byte 40 + .byte 169 + .byte 41 + .byte 170 + .byte 42 + .byte 171 + .byte 43 + .byte 172 + .byte 44 + .byte 173 + .byte 45 + .byte 174 + .byte 46 + .byte 175 + .byte 47 + .byte 176 + .byte 48 + .byte 177 + .byte 49 + .byte 178 + .byte 50 + .byte 179 + .byte 51 + .byte 180 + .byte 52 + .byte 181 + .byte 53 + .byte 182 + .byte 54 + .byte 183 + .byte 55 + .byte 184 + .byte 56 + .byte 185 + .byte 57 + .byte 186 + .byte 58 + .byte 187 + .byte 59 + .byte 188 + .byte 60 + .byte 189 + .byte 61 + .byte 190 + .byte 62 + .byte 191 + .byte 63 + .byte 64 + .byte 192 + .byte 65 + .byte 193 + .byte 66 + .byte 194 + .byte 67 + .byte 195 + .byte 68 + .byte 196 + .byte 69 + .byte 197 + .byte 70 + .byte 198 + .byte 71 + .byte 199 + .byte 72 + .byte 200 + .byte 73 + .byte 201 + .byte 74 + .byte 202 + .byte 75 + .byte 203 + .byte 76 + .byte 204 + .byte 77 + .byte 205 + .byte 78 + .byte 206 + .byte 79 + .byte 207 + .byte 80 + .byte 208 + .byte 81 + .byte 209 + .byte 82 + .byte 210 + .byte 83 + .byte 211 + .byte 84 + .byte 212 + .byte 85 + .byte 213 + .byte 86 + .byte 214 + .byte 87 + .byte 215 + .byte 88 + .byte 216 + .byte 89 + .byte 217 + .byte 90 + .byte 218 + .byte 91 + .byte 219 + .byte 92 + .byte 220 + .byte 93 + .byte 221 + .byte 94 + .byte 222 + .byte 95 + .byte 223 + .byte 224 + .byte 96 + .byte 225 + .byte 97 + .byte 226 + .byte 98 + .byte 227 + .byte 99 + .byte 228 + .byte 100 + .byte 229 + .byte 101 + .byte 230 + .byte 102 + .byte 231 + .byte 103 + .byte 232 + .byte 104 + .byte 233 + .byte 105 + .byte 234 + .byte 106 + .byte 235 + .byte 107 + .byte 236 + .byte 108 + .byte 237 + .byte 109 + .byte 238 + .byte 110 + .byte 239 + .byte 111 + .byte 240 + .byte 112 + .byte 241 + .byte 113 + .byte 242 + .byte 114 + .byte 243 + .byte 115 + .byte 244 + .byte 116 + .byte 245 + .byte 117 + .byte 246 + .byte 118 + .byte 247 + .byte 119 + .byte 248 + .byte 120 + .byte 249 + .byte 121 + .byte 250 + .byte 122 + .byte 251 + .byte 123 + .byte 252 + .byte 124 + .byte 253 + .byte 125 + .byte 254 + .byte 126 + .byte 255 + .byte 127 + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_4, @object + .size table_4, 112 +table_4: + .byte 1 + .byte 0 + .byte 3 + .byte 0 + .byte 7 + .byte 0 + .byte 15 + .byte 0 + .byte 15 + .byte 1 + .byte 14 + .byte 3 + .byte 13 + .byte 3 + .byte 11 + .byte 3 + .byte 7 + .byte 3 + .byte 15 + .byte 2 + .byte 14 + .byte 1 + .byte 12 + .byte 3 + .byte 9 + .byte 3 + .byte 3 + .byte 3 + .byte 7 + .byte 2 + .byte 14 + .byte 0 + .byte 13 + .byte 1 + .byte 10 + .byte 3 + .byte 5 + .byte 3 + .byte 11 + .byte 2 + .byte 6 + .byte 1 + .byte 12 + .byte 2 + .byte 8 + .byte 1 + .byte 0 + .byte 3 + .byte 1 + .byte 2 + .byte 2 + .byte 0 + .byte 5 + .byte 0 + .byte 11 + .byte 0 + .byte 7 + .byte 1 + .byte 14 + .byte 2 + .byte 12 + .byte 1 + .byte 8 + .byte 3 + .byte 1 + .byte 3 + .byte 3 + .byte 2 + .byte 6 + .byte 0 + .byte 13 + .byte 0 + .byte 11 + .byte 1 + .byte 6 + .byte 3 + .byte 13 + .byte 2 + .byte 10 + .byte 1 + .byte 4 + .byte 3 + .byte 9 + .byte 2 + .byte 2 + .byte 1 + .byte 4 + .byte 2 + .byte 8 + .byte 0 + .byte 1 + .byte 1 + .byte 2 + .byte 2 + .byte 4 + .byte 0 + .byte 9 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 2 + .byte 12 + .byte 0 + .byte 9 + .byte 1 + .byte 2 + .byte 3 + .byte 5 + .byte 2 + .byte 10 + .byte 0 + + .text +.global skinny_128_384_init + .type skinny_128_384_init, @function +skinny_128_384_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,12 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_384_init, .-skinny_128_384_init + + .text +.global skinny_128_384_encrypt + .type skinny_128_384_encrypt, @function +skinny_128_384_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + std Y+33,r18 + std Y+34,r19 + std Y+35,r20 + std Y+36,r21 + ldd r18,Z+36 + ldd r19,Z+37 + ldd r20,Z+38 + ldd r21,Z+39 + std Y+37,r18 + std Y+38,r19 + std Y+39,r20 + std Y+40,r21 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + std Y+41,r18 + std Y+42,r19 + std Y+43,r20 + std Y+44,r21 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + std Y+45,r18 + std Y+46,r19 + std Y+47,r20 + std Y+48,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +114: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r13 + std Y+42,r17 + std Y+43,r12 + std Y+44,r25 + std Y+45,r14 + std Y+46,r16 + std Y+47,r24 + std Y+48,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,112 + brne 5721f + rjmp 790f +5721: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r13 + std Y+34,r17 + std Y+35,r12 + std Y+36,r25 + std Y+37,r14 + std Y+38,r16 + std Y+39,r24 + std Y+40,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 114b +790: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_encrypt, .-skinny_128_384_encrypt + +.global skinny_128_384_encrypt_tk_full + .set skinny_128_384_encrypt_tk_full,skinny_128_384_encrypt + + .text +.global skinny_128_384_decrypt + .type skinny_128_384_decrypt, @function +skinny_128_384_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,48 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 68 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r23 + std Y+2,r2 + std Y+3,r21 + std Y+4,r20 + std Y+5,r3 + std Y+6,r18 + std Y+7,r19 + std Y+8,r22 + std Y+9,r9 + std Y+10,r10 + std Y+11,r7 + std Y+12,r6 + std Y+13,r11 + std Y+14,r4 + std Y+15,r5 + std Y+16,r8 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r23 + std Y+18,r2 + std Y+19,r21 + std Y+20,r20 + std Y+21,r3 + std Y+22,r18 + std Y+23,r19 + std Y+24,r22 + std Y+25,r9 + std Y+26,r10 + std Y+27,r7 + std Y+28,r6 + std Y+29,r11 + std Y+30,r4 + std Y+31,r5 + std Y+32,r8 + ldd r18,Z+32 + ldd r19,Z+33 + ldd r20,Z+34 + ldd r21,Z+35 + ldd r22,Z+36 + ldd r23,Z+37 + ldd r2,Z+38 + ldd r3,Z+39 + ldd r4,Z+40 + ldd r5,Z+41 + ldd r6,Z+42 + ldd r7,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + std Y+33,r23 + std Y+34,r2 + std Y+35,r21 + std Y+36,r20 + std Y+37,r3 + std Y+38,r18 + std Y+39,r19 + std Y+40,r22 + std Y+41,r9 + std Y+42,r10 + std Y+43,r7 + std Y+44,r6 + std Y+45,r11 + std Y+46,r4 + std Y+47,r5 + std Y+48,r8 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +122: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 122b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,28 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +150: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 150b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,28 + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 +179: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 179b + std Y+33,r12 + std Y+34,r13 + std Y+35,r14 + std Y+36,r15 + std Y+37,r24 + std Y+38,r25 + std Y+39,r16 + std Y+40,r17 + ldi r26,28 + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 +207: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 207b + std Y+41,r12 + std Y+42,r13 + std Y+43,r14 + std Y+44,r15 + std Y+45,r24 + std Y+46,r25 + std Y+47,r16 + std Y+48,r17 + ldi r26,112 +227: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+41 + eor r22,r0 + ldd r0,Y+42 + eor r23,r0 + ldd r0,Y+43 + eor r2,r0 + ldd r0,Y+44 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldd r0,Y+45 + eor r4,r0 + ldd r0,Y+46 + eor r5,r0 + ldd r0,Y+47 + eor r6,r0 + ldd r0,Y+48 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+33 + eor r4,r0 + ldd r0,Y+34 + eor r5,r0 + ldd r0,Y+35 + eor r6,r0 + ldd r0,Y+36 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldd r0,Y+37 + eor r8,r0 + ldd r0,Y+38 + eor r9,r0 + ldd r0,Y+39 + eor r10,r0 + ldd r0,Y+40 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+33 + ldd r13,Y+34 + ldd r14,Y+35 + ldd r15,Y+36 + ldd r24,Y+37 + ldd r25,Y+38 + ldd r16,Y+39 + ldd r17,Y+40 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+33,r14 + std Y+34,r12 + std Y+35,r24 + std Y+36,r17 + std Y+37,r16 + std Y+38,r15 + std Y+39,r25 + std Y+40,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+41 + eor r8,r0 + ldd r0,Y+42 + eor r9,r0 + ldd r0,Y+43 + eor r10,r0 + ldd r0,Y+44 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldd r0,Y+45 + eor r18,r0 + ldd r0,Y+46 + eor r19,r0 + ldd r0,Y+47 + eor r20,r0 + ldd r0,Y+48 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+41 + ldd r13,Y+42 + ldd r14,Y+43 + ldd r15,Y+44 + ldd r24,Y+45 + ldd r25,Y+46 + ldd r16,Y+47 + ldd r17,Y+48 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+41,r14 + std Y+42,r12 + std Y+43,r24 + std Y+44,r17 + std Y+45,r16 + std Y+46,r15 + std Y+47,r25 + std Y+48,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+33 + eor r18,r0 + ldd r0,Y+34 + eor r19,r0 + ldd r0,Y+35 + eor r20,r0 + ldd r0,Y+36 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldd r0,Y+37 + eor r22,r0 + ldd r0,Y+38 + eor r23,r0 + ldd r0,Y+39 + eor r2,r0 + ldd r0,Y+40 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 903f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 227b +903: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+49 + ldd r27,Y+50 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,50 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_384_decrypt, .-skinny_128_384_decrypt + + .text +.global skinny_128_256_init + .type skinny_128_256_init, @function +skinny_128_256_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ldi r22,8 +1: + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + st Z+,r18 + st Z+,r19 + st Z+,r20 + st Z+,r21 + dec r22 + brne 1b + ret + .size skinny_128_256_init, .-skinny_128_256_init + + .text +.global skinny_128_256_encrypt + .type skinny_128_256_encrypt, @function +skinny_128_256_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Y+5,r18 + std Y+6,r19 + std Y+7,r20 + std Y+8,r21 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + std Y+9,r18 + std Y+10,r19 + std Y+11,r20 + std Y+12,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + std Y+13,r18 + std Y+14,r19 + std Y+15,r20 + std Y+16,r21 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + ldd r18,Z+20 + ldd r19,Z+21 + ldd r20,Z+22 + ldd r21,Z+23 + std Y+21,r18 + std Y+22,r19 + std Y+23,r20 + std Y+24,r21 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + std Y+25,r18 + std Y+26,r19 + std Y+27,r20 + std Y+28,r21 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + std Y+29,r18 + std Y+30,r19 + std Y+31,r20 + std Y+32,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r26,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + mov r26,r1 +82: + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + ldi r27,2 + eor r4,r27 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + inc r26 + ldi r27,2 + eor r22,r27 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + mov r0,r2 + mov r2,r22 + mov r22,r0 + mov r0,r3 + mov r3,r23 + mov r23,r0 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + inc r26 + ldi r27,2 + eor r18,r27 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r20 + mov r20,r18 + mov r18,r0 + mov r0,r21 + mov r21,r19 + mov r19,r0 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r13 + std Y+10,r17 + std Y+11,r12 + std Y+12,r25 + std Y+13,r14 + std Y+14,r16 + std Y+15,r24 + std Y+16,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r13 + std Y+26,r17 + std Y+27,r12 + std Y+28,r25 + std Y+29,r14 + std Y+30,r16 + std Y+31,r24 + std Y+32,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + inc r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + inc r26 + ldi r27,2 + eor r8,r27 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + cpi r26,96 + breq 594f + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r13 + std Y+2,r17 + std Y+3,r12 + std Y+4,r25 + std Y+5,r14 + std Y+6,r16 + std Y+7,r24 + std Y+8,r15 + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r27,hh8(table_2) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r13 + std Y+18,r17 + std Y+19,r12 + std Y+20,r25 + std Y+21,r14 + std Y+22,r16 + std Y+23,r24 + std Y+24,r15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r27,hh8(table_0) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 82b +594: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_encrypt, .-skinny_128_256_encrypt + +.global skinny_128_256_encrypt_tk_full + .set skinny_128_256_encrypt_tk_full,skinny_128_256_encrypt + + .text +.global skinny_128_256_decrypt + .type skinny_128_256_decrypt, @function +skinny_128_256_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,32 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 52 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + ldd r4,Z+8 + ldd r5,Z+9 + ldd r6,Z+10 + ldd r7,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r18,Z+16 + ldd r19,Z+17 + ldd r20,Z+18 + ldd r21,Z+19 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + ldd r4,Z+24 + ldd r5,Z+25 + ldd r6,Z+26 + ldd r7,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Y+17,r18 + std Y+18,r19 + std Y+19,r20 + std Y+20,r21 + std Y+21,r22 + std Y+22,r23 + std Y+23,r2 + std Y+24,r3 + std Y+25,r4 + std Y+26,r5 + std Y+27,r6 + std Y+28,r7 + std Y+29,r8 + std Y+30,r9 + std Y+31,r10 + std Y+32,r11 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ldi r30,lo8(table_2) + ldi r31,hi8(table_2) +#if defined(RAMPZ) + ldi r26,hh8(table_2) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,24 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 +90: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 90b + std Y+17,r12 + std Y+18,r13 + std Y+19,r14 + std Y+20,r15 + std Y+21,r24 + std Y+22,r25 + std Y+23,r16 + std Y+24,r17 + ldi r26,24 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 +118: + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + dec r26 + brne 118b + std Y+25,r12 + std Y+26,r13 + std Y+27,r14 + std Y+28,r15 + std Y+29,r24 + std Y+30,r25 + std Y+31,r16 + std Y+32,r17 + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r26,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r26 +#endif + ldi r26,96 +139: + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + eor r8,r22 + eor r9,r23 + eor r10,r2 + eor r11,r3 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + mov r0,r4 + mov r4,r5 + mov r5,r6 + mov r6,r7 + mov r7,r0 + mov r0,r8 + mov r8,r10 + mov r10,r0 + mov r0,r9 + mov r9,r11 + mov r11,r0 + mov r0,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + ldd r0,Y+9 + eor r22,r0 + ldd r0,Y+10 + eor r23,r0 + ldd r0,Y+11 + eor r2,r0 + ldd r0,Y+12 + eor r3,r0 + ldd r0,Y+25 + eor r22,r0 + ldd r0,Y+26 + eor r23,r0 + ldd r0,Y+27 + eor r2,r0 + ldd r0,Y+28 + eor r3,r0 + ldd r0,Y+13 + eor r4,r0 + ldd r0,Y+14 + eor r5,r0 + ldd r0,Y+15 + eor r6,r0 + ldd r0,Y+16 + eor r7,r0 + ldd r0,Y+29 + eor r4,r0 + ldd r0,Y+30 + eor r5,r0 + ldd r0,Y+31 + eor r6,r0 + ldd r0,Y+32 + eor r7,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + ldi r27,2 + eor r8,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r18 + mov r18,r20 + mov r20,r0 + mov r0,r19 + mov r19,r21 + mov r21,r0 + mov r0,r3 + mov r3,r2 + mov r2,r23 + mov r23,r22 + mov r22,r0 + ldd r0,Y+1 + eor r4,r0 + ldd r0,Y+2 + eor r5,r0 + ldd r0,Y+3 + eor r6,r0 + ldd r0,Y+4 + eor r7,r0 + ldd r0,Y+17 + eor r4,r0 + ldd r0,Y+18 + eor r5,r0 + ldd r0,Y+19 + eor r6,r0 + ldd r0,Y+20 + eor r7,r0 + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + ldd r0,Y+21 + eor r8,r0 + ldd r0,Y+22 + eor r9,r0 + ldd r0,Y+23 + eor r10,r0 + ldd r0,Y+24 + eor r11,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r4,r27 + ldi r27,2 + eor r18,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + ldd r24,Y+5 + ldd r25,Y+6 + ldd r16,Y+7 + ldd r17,Y+8 + std Y+1,r14 + std Y+2,r12 + std Y+3,r24 + std Y+4,r17 + std Y+5,r16 + std Y+6,r15 + std Y+7,r25 + std Y+8,r13 + ldd r12,Y+17 + ldd r13,Y+18 + ldd r14,Y+19 + ldd r15,Y+20 + ldd r24,Y+21 + ldd r25,Y+22 + ldd r16,Y+23 + ldd r17,Y+24 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+17,r14 + std Y+18,r12 + std Y+19,r24 + std Y+20,r17 + std Y+21,r16 + std Y+22,r15 + std Y+23,r25 + std Y+24,r13 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + mov r0,r18 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r7 + mov r7,r6 + mov r6,r5 + mov r5,r4 + mov r4,r0 + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + ldd r0,Y+25 + eor r8,r0 + ldd r0,Y+26 + eor r9,r0 + ldd r0,Y+27 + eor r10,r0 + ldd r0,Y+28 + eor r11,r0 + ldd r0,Y+13 + eor r18,r0 + ldd r0,Y+14 + eor r19,r0 + ldd r0,Y+15 + eor r20,r0 + ldd r0,Y+16 + eor r21,r0 + ldd r0,Y+29 + eor r18,r0 + ldd r0,Y+30 + eor r19,r0 + ldd r0,Y+31 + eor r20,r0 + ldd r0,Y+32 + eor r21,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r8,r27 + ldi r27,2 + eor r22,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + ldd r12,Y+9 + ldd r13,Y+10 + ldd r14,Y+11 + ldd r15,Y+12 + ldd r24,Y+13 + ldd r25,Y+14 + ldd r16,Y+15 + ldd r17,Y+16 + std Y+9,r14 + std Y+10,r12 + std Y+11,r24 + std Y+12,r17 + std Y+13,r16 + std Y+14,r15 + std Y+15,r25 + std Y+16,r13 + ldd r12,Y+25 + ldd r13,Y+26 + ldd r14,Y+27 + ldd r15,Y+28 + ldd r24,Y+29 + ldd r25,Y+30 + ldd r16,Y+31 + ldd r17,Y+32 + mov r30,r12 +#if defined(RAMPZ) + elpm r12,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r12,Z +#elif defined(__AVR_TINY__) + ld r12,Z +#else + lpm + mov r12,r0 +#endif + mov r30,r13 +#if defined(RAMPZ) + elpm r13,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r13,Z +#elif defined(__AVR_TINY__) + ld r13,Z +#else + lpm + mov r13,r0 +#endif + mov r30,r14 +#if defined(RAMPZ) + elpm r14,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r14,Z +#elif defined(__AVR_TINY__) + ld r14,Z +#else + lpm + mov r14,r0 +#endif + mov r30,r15 +#if defined(RAMPZ) + elpm r15,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r15,Z +#elif defined(__AVR_TINY__) + ld r15,Z +#else + lpm + mov r15,r0 +#endif + mov r30,r24 +#if defined(RAMPZ) + elpm r24,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r24,Z +#elif defined(__AVR_TINY__) + ld r24,Z +#else + lpm + mov r24,r0 +#endif + mov r30,r25 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + mov r30,r16 +#if defined(RAMPZ) + elpm r16,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r16,Z +#elif defined(__AVR_TINY__) + ld r16,Z +#else + lpm + mov r16,r0 +#endif + mov r30,r17 +#if defined(RAMPZ) + elpm r17,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r17,Z +#elif defined(__AVR_TINY__) + ld r17,Z +#else + lpm + mov r17,r0 +#endif + std Y+25,r14 + std Y+26,r12 + std Y+27,r24 + std Y+28,r17 + std Y+29,r16 + std Y+30,r15 + std Y+31,r25 + std Y+32,r13 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + eor r22,r4 + eor r23,r5 + eor r2,r6 + eor r3,r7 + mov r0,r22 + mov r22,r23 + mov r23,r2 + mov r2,r3 + mov r3,r0 + mov r0,r4 + mov r4,r6 + mov r6,r0 + mov r0,r5 + mov r5,r7 + mov r7,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + ldd r0,Y+1 + eor r18,r0 + ldd r0,Y+2 + eor r19,r0 + ldd r0,Y+3 + eor r20,r0 + ldd r0,Y+4 + eor r21,r0 + ldd r0,Y+17 + eor r18,r0 + ldd r0,Y+18 + eor r19,r0 + ldd r0,Y+19 + eor r20,r0 + ldd r0,Y+20 + eor r21,r0 + ldd r0,Y+5 + eor r22,r0 + ldd r0,Y+6 + eor r23,r0 + ldd r0,Y+7 + eor r2,r0 + ldd r0,Y+8 + eor r3,r0 + ldd r0,Y+21 + eor r22,r0 + ldd r0,Y+22 + eor r23,r0 + ldd r0,Y+23 + eor r2,r0 + ldd r0,Y+24 + eor r3,r0 + ldi r30,lo8(table_4) + ldi r31,hi8(table_4) +#if defined(RAMPZ) + ldi r24,hh8(table_4) + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r22,r27 + dec r26 + mov r30,r26 +#if defined(RAMPZ) + elpm r27,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r27,Z +#elif defined(__AVR_TINY__) + ld r27,Z +#else + lpm + mov r27,r0 +#endif + eor r18,r27 + ldi r27,2 + eor r4,r27 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r27,hh8(table_1) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + mov r30,r18 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + mov r30,r19 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + mov r30,r20 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + mov r30,r21 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + mov r30,r22 +#if defined(RAMPZ) + elpm r22,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r22,Z +#elif defined(__AVR_TINY__) + ld r22,Z +#else + lpm + mov r22,r0 +#endif + mov r30,r23 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + mov r30,r2 +#if defined(RAMPZ) + elpm r2,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r2,Z +#elif defined(__AVR_TINY__) + ld r2,Z +#else + lpm + mov r2,r0 +#endif + mov r30,r3 +#if defined(RAMPZ) + elpm r3,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r3,Z +#elif defined(__AVR_TINY__) + ld r3,Z +#else + lpm + mov r3,r0 +#endif + mov r30,r4 +#if defined(RAMPZ) + elpm r4,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r4,Z +#elif defined(__AVR_TINY__) + ld r4,Z +#else + lpm + mov r4,r0 +#endif + mov r30,r5 +#if defined(RAMPZ) + elpm r5,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r5,Z +#elif defined(__AVR_TINY__) + ld r5,Z +#else + lpm + mov r5,r0 +#endif + mov r30,r6 +#if defined(RAMPZ) + elpm r6,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r6,Z +#elif defined(__AVR_TINY__) + ld r6,Z +#else + lpm + mov r6,r0 +#endif + mov r30,r7 +#if defined(RAMPZ) + elpm r7,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r7,Z +#elif defined(__AVR_TINY__) + ld r7,Z +#else + lpm + mov r7,r0 +#endif + mov r30,r8 +#if defined(RAMPZ) + elpm r8,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r8,Z +#elif defined(__AVR_TINY__) + ld r8,Z +#else + lpm + mov r8,r0 +#endif + mov r30,r9 +#if defined(RAMPZ) + elpm r9,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r9,Z +#elif defined(__AVR_TINY__) + ld r9,Z +#else + lpm + mov r9,r0 +#endif + mov r30,r10 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + mov r30,r11 +#if defined(RAMPZ) + elpm r11,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r11,Z +#elif defined(__AVR_TINY__) + ld r11,Z +#else + lpm + mov r11,r0 +#endif + cp r26,r1 + breq 651f + ldi r30,lo8(table_3) + ldi r31,hi8(table_3) +#if defined(RAMPZ) + ldi r27,hh8(table_3) + out _SFR_IO_ADDR(RAMPZ),r27 +#endif + rjmp 139b +651: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+33 + ldd r27,Y+34 + st X+,r18 + st X+,r19 + st X+,r20 + st X+,r21 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + adiw r28,34 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size skinny_128_256_decrypt, .-skinny_128_256_decrypt + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.c new file mode 100644 index 0000000..579ced1 --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.c @@ -0,0 +1,801 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-skinny128.h" +#include "internal-skinnyutil.h" +#include "internal-util.h" +#include + +#if !defined(__AVR__) + +STATIC_INLINE void skinny128_fast_forward_tk(uint32_t *tk) +{ + /* This function is used to fast-forward the TK1 tweak value + * to the value at the end of the key schedule for decryption. + * + * The tweak permutation repeats every 16 rounds, so SKINNY-128-256 + * with 48 rounds does not need any fast forwarding applied. + * SKINNY-128-128 with 40 rounds and SKINNY-128-384 with 56 rounds + * are equivalent to applying the permutation 8 times: + * + * PT*8 = [5, 6, 3, 2, 7, 0, 1, 4, 13, 14, 11, 10, 15, 8, 9, 12] + */ + uint32_t row0 = tk[0]; + uint32_t row1 = tk[1]; + uint32_t row2 = tk[2]; + uint32_t row3 = tk[3]; + tk[0] = ((row1 >> 8) & 0x0000FFFFU) | + ((row0 >> 8) & 0x00FF0000U) | + ((row0 << 8) & 0xFF000000U); + tk[1] = ((row1 >> 24) & 0x000000FFU) | + ((row0 << 8) & 0x00FFFF00U) | + ((row1 << 24) & 0xFF000000U); + tk[2] = ((row3 >> 8) & 0x0000FFFFU) | + ((row2 >> 8) & 0x00FF0000U) | + ((row2 << 8) & 0xFF000000U); + tk[3] = ((row3 >> 24) & 0x000000FFU) | + ((row2 << 8) & 0x00FFFF00U) | + ((row3 << 24) & 0xFF000000U); +} + +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]) +{ +#if !SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint32_t *schedule; + unsigned round; + uint8_t rc; +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); + memcpy(ks->TK3, key + 32, sizeof(ks->TK3)); +#else + /* Set the initial states of TK1, TK2, and TK3 */ + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); + + /* Set up the key schedule using TK2 and TK3. TK1 is not added + * to the key schedule because we will derive that part of the + * schedule during encryption operations */ + schedule = ks->k; + rc = 0; + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round, schedule += 2) { + /* XOR the round constants with the current schedule words. + * The round constants for the 3rd and 4th rows are + * fixed and will be applied during encryption. */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + schedule[0] = TK2[0] ^ TK3[0] ^ (rc & 0x0F); + schedule[1] = TK2[1] ^ TK3[1] ^ (rc >> 4); + + /* Permute TK2 and TK3 for the next round */ + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + + /* Apply the LFSR's to TK2 and TK3 */ + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + } +#endif +} + +void skinny_128_384_encrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0; +#else + const uint32_t *schedule = ks->k; +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; +#endif + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 for the next round */ + skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_384_decrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t TK3[4]; + uint8_t rc = 0x15; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_384_ROUNDS * 2 - 2]); +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state, TK1 */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif + + /* Permute TK1 to fast-forward it to the end of the key schedule */ + skinny128_fast_forward_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_fast_forward_tk(TK2); + skinny128_fast_forward_tk(TK3); + for (round = 0; round < SKINNY_128_384_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2 and TK3. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + skinny128_LFSR3(TK3[2]); + skinny128_LFSR3(TK3[3]); + } +#endif + + /* Perform all decryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Inverse permutation on TK1 for this round */ + skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_inv_permute_tk(TK3); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); + skinny128_LFSR2(TK3[2]); + skinny128_LFSR2(TK3[3]); +#endif + + /* Inverse mix of the columns */ + temp = s3; + s3 = s0; + s0 = s1; + s1 = s2; + s3 ^= temp; + s2 = temp ^ s0; + s1 ^= s2; + + /* Inverse shift of the rows */ + s1 = leftRotate24(s1); + s2 = leftRotate16(s2); + s3 = leftRotate8(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif + s2 ^= 0x02; + + /* Apply the inverse of the S-box to all bytes in the state */ + skinny128_inv_sbox(s0); + skinny128_inv_sbox(s1); + skinny128_inv_sbox(s2); + skinny128_inv_sbox(s3); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; + uint32_t TK2[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK3[4]; + uint8_t rc = 0; +#else + const uint32_t *schedule = ks->k; +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); + TK2[0] = le_load_word32(tk2); + TK2[1] = le_load_word32(tk2 + 4); + TK2[2] = le_load_word32(tk2 + 8); + TK2[3] = le_load_word32(tk2 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK3[0] = le_load_word32(ks->TK3); + TK3[1] = le_load_word32(ks->TK3 + 4); + TK3[2] = le_load_word32(ks->TK3 + 8); + TK3[3] = le_load_word32(ks->TK3 + 12); +#endif + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0] ^ TK2[0]; + s1 ^= schedule[1] ^ TK1[1] ^ TK2[1]; +#endif + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 and TK2 for the next round */ + skinny128_permute_tk(TK1); + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK3); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); +#else + schedule += 2; +#endif + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_384_encrypt_tk_full + (const unsigned char key[48], unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; + uint32_t TK2[4]; + uint32_t TK3[4]; + uint32_t temp; + unsigned round; + uint8_t rc = 0; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakey */ + TK1[0] = le_load_word32(key); + TK1[1] = le_load_word32(key + 4); + TK1[2] = le_load_word32(key + 8); + TK1[3] = le_load_word32(key + 12); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + TK3[0] = le_load_word32(key + 32); + TK3[1] = le_load_word32(key + 36); + TK3[2] = le_load_word32(key + 40); + TK3[3] = le_load_word32(key + 44); + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_384_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* XOR the round constant and the subkey for this round */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ TK3[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ TK3[1] ^ (rc >> 4); + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1, TK2, and TK3 for the next round */ + skinny128_permute_tk(TK1); + skinny128_permute_tk(TK2); + skinny128_permute_tk(TK3); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR3(TK3[0]); + skinny128_LFSR3(TK3[1]); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]) +{ +#if !SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint32_t *schedule; + unsigned round; + uint8_t rc; +#endif + +#if SKINNY_128_SMALL_SCHEDULE + /* Copy the input key as-is when using the small key schedule version */ + memcpy(ks->TK1, key, sizeof(ks->TK1)); + memcpy(ks->TK2, key + 16, sizeof(ks->TK2)); +#else + /* Set the initial states of TK1 and TK2 */ + memcpy(ks->TK1, key, 16); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + + /* Set up the key schedule using TK2. TK1 is not added + * to the key schedule because we will derive that part of the + * schedule during encryption operations */ + schedule = ks->k; + rc = 0; + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round, schedule += 2) { + /* XOR the round constants with the current schedule words. + * The round constants for the 3rd and 4th rows are + * fixed and will be applied during encryption. */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + schedule[0] = TK2[0] ^ (rc & 0x0F); + schedule[1] = TK2[1] ^ (rc >> 4); + + /* Permute TK2 for the next round */ + skinny128_permute_tk(TK2); + + /* Apply the LFSR to TK2 */ + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + } +#endif +} + +void skinny_128_256_encrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0; +#else + const uint32_t *schedule = ks->k; +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state, TK1 */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); +#endif + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* XOR the round constant and the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; +#endif + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 and TK2 for the next round */ + skinny128_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); +#else + schedule += 2; +#endif + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_256_decrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; +#if SKINNY_128_SMALL_SCHEDULE + uint32_t TK2[4]; + uint8_t rc = 0x09; +#else + const uint32_t *schedule = &(ks->k[SKINNY_128_256_ROUNDS * 2 - 2]); +#endif + uint32_t temp; + unsigned round; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakable part of the state, TK1. + * There is no need to fast-forward TK1 because the value at + * the end of the key schedule is the same as at the start */ + TK1[0] = le_load_word32(ks->TK1); + TK1[1] = le_load_word32(ks->TK1 + 4); + TK1[2] = le_load_word32(ks->TK1 + 8); + TK1[3] = le_load_word32(ks->TK1 + 12); +#if SKINNY_128_SMALL_SCHEDULE + TK2[0] = le_load_word32(ks->TK2); + TK2[1] = le_load_word32(ks->TK2 + 4); + TK2[2] = le_load_word32(ks->TK2 + 8); + TK2[3] = le_load_word32(ks->TK2 + 12); + for (round = 0; round < SKINNY_128_256_ROUNDS; round += 2) { + // Also fast-forward the LFSR's on every byte of TK2. + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + skinny128_LFSR2(TK2[2]); + skinny128_LFSR2(TK2[3]); + } +#endif + + /* Perform all decryption rounds */ + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { + /* Inverse permutation on TK1 for this round */ + skinny128_inv_permute_tk(TK1); +#if SKINNY_128_SMALL_SCHEDULE + skinny128_inv_permute_tk(TK2); + skinny128_LFSR3(TK2[2]); + skinny128_LFSR3(TK2[3]); +#endif + + /* Inverse mix of the columns */ + temp = s3; + s3 = s0; + s0 = s1; + s1 = s2; + s3 ^= temp; + s2 = temp ^ s0; + s1 ^= s2; + + /* Inverse shift of the rows */ + s1 = leftRotate24(s1); + s2 = leftRotate16(s2); + s3 = leftRotate8(s3); + + /* Apply the subkey for this round */ +#if SKINNY_128_SMALL_SCHEDULE + rc = (rc >> 1) ^ (((rc << 5) ^ rc ^ 0x20) & 0x20); + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); +#else + s0 ^= schedule[0] ^ TK1[0]; + s1 ^= schedule[1] ^ TK1[1]; + schedule -= 2; +#endif + s2 ^= 0x02; + + /* Apply the inverse of the S-box to all bytes in the state */ + skinny128_inv_sbox(s0); + skinny128_inv_sbox(s1); + skinny128_inv_sbox(s2); + skinny128_inv_sbox(s3); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +void skinny_128_256_encrypt_tk_full + (const unsigned char key[32], unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t TK1[4]; + uint32_t TK2[4]; + uint32_t temp; + unsigned round; + uint8_t rc = 0; + + /* Unpack the input block into the state array */ + s0 = le_load_word32(input); + s1 = le_load_word32(input + 4); + s2 = le_load_word32(input + 8); + s3 = le_load_word32(input + 12); + + /* Make a local copy of the tweakey */ + TK1[0] = le_load_word32(key); + TK1[1] = le_load_word32(key + 4); + TK1[2] = le_load_word32(key + 8); + TK1[3] = le_load_word32(key + 12); + TK2[0] = le_load_word32(key + 16); + TK2[1] = le_load_word32(key + 20); + TK2[2] = le_load_word32(key + 24); + TK2[3] = le_load_word32(key + 28); + + /* Perform all encryption rounds */ + for (round = 0; round < SKINNY_128_256_ROUNDS; ++round) { + /* Apply the S-box to all bytes in the state */ + skinny128_sbox(s0); + skinny128_sbox(s1); + skinny128_sbox(s2); + skinny128_sbox(s3); + + /* XOR the round constant and the subkey for this round */ + rc = (rc << 1) ^ ((rc >> 5) & 0x01) ^ ((rc >> 4) & 0x01) ^ 0x01; + rc &= 0x3F; + s0 ^= TK1[0] ^ TK2[0] ^ (rc & 0x0F); + s1 ^= TK1[1] ^ TK2[1] ^ (rc >> 4); + s2 ^= 0x02; + + /* Shift the cells in the rows right, which moves the cell + * values up closer to the MSB. That is, we do a left rotate + * on the word to rotate the cells in the word right */ + s1 = leftRotate8(s1); + s2 = leftRotate16(s2); + s3 = leftRotate24(s3); + + /* Mix the columns */ + s1 ^= s2; + s2 ^= s0; + temp = s3 ^ s2; + s3 = s2; + s2 = s1; + s1 = s0; + s0 = temp; + + /* Permute TK1 and TK2 for the next round */ + skinny128_permute_tk(TK1); + skinny128_permute_tk(TK2); + skinny128_LFSR2(TK2[0]); + skinny128_LFSR2(TK2[1]); + } + + /* Pack the result into the output buffer */ + le_store_word32(output, s0); + le_store_word32(output + 4, s1); + le_store_word32(output + 8, s2); + le_store_word32(output + 12, s3); +} + +#else /* __AVR__ */ + +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2) +{ + memcpy(ks->TK2, tk2, 16); + skinny_128_384_encrypt(ks, output, input); +} + +#endif /* __AVR__ */ diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.h new file mode 100644 index 0000000..2bfda3c --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinny128.h @@ -0,0 +1,244 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SKINNY128_H +#define LW_INTERNAL_SKINNY128_H + +/** + * \file internal-skinny128.h + * \brief SKINNY-128 block cipher family. + * + * References: https://eprint.iacr.org/2016/660.pdf, + * https://sites.google.com/site/skinnycipher/ + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \def SKINNY_128_SMALL_SCHEDULE + * \brief Defined to 1 to use the small key schedule version of SKINNY-128. + */ +#if defined(__AVR__) +#define SKINNY_128_SMALL_SCHEDULE 1 +#else +#define SKINNY_128_SMALL_SCHEDULE 0 +#endif + +/** + * \brief Size of a block for SKINNY-128 block ciphers. + */ +#define SKINNY_128_BLOCK_SIZE 16 + +/** + * \brief Number of rounds for SKINNY-128-384. + */ +#define SKINNY_128_384_ROUNDS 56 + +/** + * \brief Structure of the key schedule for SKINNY-128-384. + */ +typedef struct +{ + /** TK1 for the tweakable part of the key schedule */ + uint8_t TK1[16]; + +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; + + /** TK3 for the small key schedule */ + uint8_t TK3[16]; +#else + /** Words of the full key schedule */ + uint32_t k[SKINNY_128_384_ROUNDS * 2]; +#endif + +} skinny_128_384_key_schedule_t; + +/** + * \brief Initializes the key schedule for SKINNY-128-384. + * + * \param ks Points to the key schedule to initialize. + * \param key Points to the key data. + */ +void skinny_128_384_init + (skinny_128_384_key_schedule_t *ks, const unsigned char key[48]); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-384. + * + * \param ks Points to the SKINNY-128-384 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_384_encrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Decrypts a 128-bit block with SKINNY-128-384. + * + * \param ks Points to the SKINNY-128-384 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_384_decrypt + (const skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-384 and an explicitly + * provided TK2 value. + * + * \param ks Points to the SKINNY-128-384 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * \param tk2 TK2 value that should be updated on the fly. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * This version is useful when both TK1 and TK2 change from block to block. + * When the key is initialized with skinny_128_384_init(), the TK2 part of + * the key value should be set to zero. + * + * \note Some versions of this function may modify the key schedule to + * copy tk2 into place. + */ +void skinny_128_384_encrypt_tk2 + (skinny_128_384_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, const unsigned char *tk2); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-384 and a + * fully specified tweakey value. + * + * \param key Points to the 384-bit tweakey value. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * This version is useful when the entire tweakey changes from block to + * block. It is slower than the other versions of SKINNY-128-384 but + * more memory-efficient. + */ +void skinny_128_384_encrypt_tk_full + (const unsigned char key[48], unsigned char *output, + const unsigned char *input); + +/** + * \brief Number of rounds for SKINNY-128-256. + */ +#define SKINNY_128_256_ROUNDS 48 + +/** + * \brief Structure of the key schedule for SKINNY-128-256. + */ +typedef struct +{ + /** TK1 for the tweakable part of the key schedule */ + uint8_t TK1[16]; + +#if SKINNY_128_SMALL_SCHEDULE + /** TK2 for the small key schedule */ + uint8_t TK2[16]; +#else + /** Words of the full key schedule */ + uint32_t k[SKINNY_128_256_ROUNDS * 2]; +#endif + +} skinny_128_256_key_schedule_t; + +/** + * \brief Initializes the key schedule for SKINNY-128-256. + * + * \param ks Points to the key schedule to initialize. + * \param key Points to the key data. + */ +void skinny_128_256_init + (skinny_128_256_key_schedule_t *ks, const unsigned char key[32]); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-256. + * + * \param ks Points to the SKINNY-128-256 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_256_encrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Decrypts a 128-bit block with SKINNY-128-256. + * + * \param ks Points to the SKINNY-128-256 key schedule. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + */ +void skinny_128_256_decrypt + (const skinny_128_256_key_schedule_t *ks, unsigned char *output, + const unsigned char *input); + +/** + * \brief Encrypts a 128-bit block with SKINNY-128-256 and a + * fully specified tweakey value. + * + * \param key Points to the 256-bit tweakey value. + * \param output Output buffer which must be at least 16 bytes in length. + * \param input Input buffer which must be at least 16 bytes in length. + * + * The \a input and \a output buffers can be the same buffer for + * in-place encryption. + * + * This version is useful when the entire tweakey changes from block to + * block. It is slower than the other versions of SKINNY-128-256 but + * more memory-efficient. + */ +void skinny_128_256_encrypt_tk_full + (const unsigned char key[32], unsigned char *output, + const unsigned char *input); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinnyutil.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinnyutil.h new file mode 100644 index 0000000..83136cb --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-skinnyutil.h @@ -0,0 +1,328 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SKINNYUTIL_H +#define LW_INTERNAL_SKINNYUTIL_H + +/** + * \file internal-skinnyutil.h + * \brief Utilities to help implement SKINNY and its variants. + */ + +#include "internal-util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond skinnyutil */ + +/* Utilities for implementing SKINNY-128 */ + +#define skinny128_LFSR2(x) \ + do { \ + uint32_t _x = (x); \ + (x) = ((_x << 1) & 0xFEFEFEFEU) ^ \ + (((_x >> 7) ^ (_x >> 5)) & 0x01010101U); \ + } while (0) + + +#define skinny128_LFSR3(x) \ + do { \ + uint32_t _x = (x); \ + (x) = ((_x >> 1) & 0x7F7F7F7FU) ^ \ + (((_x << 7) ^ (_x << 1)) & 0x80808080U); \ + } while (0) + +/* LFSR2 and LFSR3 are inverses of each other */ +#define skinny128_inv_LFSR2(x) skinny128_LFSR3(x) +#define skinny128_inv_LFSR3(x) skinny128_LFSR2(x) + +#define skinny128_permute_tk(tk) \ + do { \ + /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ + uint32_t row2 = tk[2]; \ + uint32_t row3 = tk[3]; \ + tk[2] = tk[0]; \ + tk[3] = tk[1]; \ + row3 = (row3 << 16) | (row3 >> 16); \ + tk[0] = ((row2 >> 8) & 0x000000FFU) | \ + ((row2 << 16) & 0x00FF0000U) | \ + ( row3 & 0xFF00FF00U); \ + tk[1] = ((row2 >> 16) & 0x000000FFU) | \ + (row2 & 0xFF000000U) | \ + ((row3 << 8) & 0x0000FF00U) | \ + ( row3 & 0x00FF0000U); \ + } while (0) + +#define skinny128_inv_permute_tk(tk) \ + do { \ + /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ + uint32_t row0 = tk[0]; \ + uint32_t row1 = tk[1]; \ + tk[0] = tk[2]; \ + tk[1] = tk[3]; \ + tk[2] = ((row0 >> 16) & 0x000000FFU) | \ + ((row0 << 8) & 0x0000FF00U) | \ + ((row1 << 16) & 0x00FF0000U) | \ + ( row1 & 0xFF000000U); \ + tk[3] = ((row0 >> 16) & 0x0000FF00U) | \ + ((row0 << 16) & 0xFF000000U) | \ + ((row1 >> 16) & 0x000000FFU) | \ + ((row1 << 8) & 0x00FF0000U); \ + } while (0) + +/* + * Apply the SKINNY sbox. The original version from the specification is + * equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) + * #define SBOX_SWAP(x) + * (((x) & 0xF9F9F9F9U) | + * (((x) >> 1) & 0x02020202U) | + * (((x) << 1) & 0x04040404U)) + * #define SBOX_PERMUTE(x) + * ((((x) & 0x01010101U) << 2) | + * (((x) & 0x06060606U) << 5) | + * (((x) & 0x20202020U) >> 5) | + * (((x) & 0xC8C8C8C8U) >> 2) | + * (((x) & 0x10101010U) >> 1)) + * + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE(x); + * x = SBOX_MIX(x); + * return SBOX_SWAP(x); + * + * However, we can mix the bits in their original positions and then + * delay the SBOX_PERMUTE and SBOX_SWAP steps to be performed with one + * final permuatation. This reduces the number of shift operations. + */ +#define skinny128_sbox(x) \ +do { \ + uint32_t y; \ + \ + /* Mix the bits */ \ + x = ~x; \ + x ^= (((x >> 2) & (x >> 3)) & 0x11111111U); \ + y = (((x << 5) & (x << 1)) & 0x20202020U); \ + x ^= (((x << 5) & (x << 4)) & 0x40404040U) ^ y; \ + y = (((x << 2) & (x << 1)) & 0x80808080U); \ + x ^= (((x >> 2) & (x << 1)) & 0x02020202U) ^ y; \ + y = (((x >> 5) & (x << 1)) & 0x04040404U); \ + x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ + x = ~x; \ + \ + /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ + /* The final permutation for each byte is [2 7 6 1 3 0 4 5] */ \ + x = ((x & 0x08080808U) << 1) | \ + ((x & 0x32323232U) << 2) | \ + ((x & 0x01010101U) << 5) | \ + ((x & 0x80808080U) >> 6) | \ + ((x & 0x40404040U) >> 4) | \ + ((x & 0x04040404U) >> 2); \ +} while (0) + +/* + * Apply the inverse of the SKINNY sbox. The original version from the + * specification is equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x11111111U) ^ (x)) + * #define SBOX_SWAP(x) + * (((x) & 0xF9F9F9F9U) | + * (((x) >> 1) & 0x02020202U) | + * (((x) << 1) & 0x04040404U)) + * #define SBOX_PERMUTE_INV(x) + * ((((x) & 0x08080808U) << 1) | + * (((x) & 0x32323232U) << 2) | + * (((x) & 0x01010101U) << 5) | + * (((x) & 0xC0C0C0C0U) >> 5) | + * (((x) & 0x04040404U) >> 2)) + * + * x = SBOX_SWAP(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_PERMUTE_INV(x); + * return SBOX_MIX(x); + * + * However, we can mix the bits in their original positions and then + * delay the SBOX_PERMUTE_INV and SBOX_SWAP steps to be performed with one + * final permuatation. This reduces the number of shift operations. + */ +#define skinny128_inv_sbox(x) \ +do { \ + uint32_t y; \ + \ + /* Mix the bits */ \ + x = ~x; \ + y = (((x >> 1) & (x >> 3)) & 0x01010101U); \ + x ^= (((x >> 2) & (x >> 3)) & 0x10101010U) ^ y; \ + y = (((x >> 6) & (x >> 1)) & 0x02020202U); \ + x ^= (((x >> 1) & (x >> 2)) & 0x08080808U) ^ y; \ + y = (((x << 2) & (x << 1)) & 0x80808080U); \ + x ^= (((x >> 1) & (x << 2)) & 0x04040404U) ^ y; \ + y = (((x << 5) & (x << 1)) & 0x20202020U); \ + x ^= (((x << 4) & (x << 5)) & 0x40404040U) ^ y; \ + x = ~x; \ + \ + /* Permutation generated by http://programming.sirrida.de/calcperm.php */ \ + /* The final permutation for each byte is [5 3 0 4 6 7 2 1] */ \ + x = ((x & 0x01010101U) << 2) | \ + ((x & 0x04040404U) << 4) | \ + ((x & 0x02020202U) << 6) | \ + ((x & 0x20202020U) >> 5) | \ + ((x & 0xC8C8C8C8U) >> 2) | \ + ((x & 0x10101010U) >> 1); \ +} while (0) + +/* Utilities for implementing SKINNY-64 */ + +#define skinny64_LFSR2(x) \ + do { \ + uint16_t _x = (x); \ + (x) = ((_x << 1) & 0xEEEEU) ^ (((_x >> 3) ^ (_x >> 2)) & 0x1111U); \ + } while (0) + +#define skinny64_LFSR3(x) \ + do { \ + uint16_t _x = (x); \ + (x) = ((_x >> 1) & 0x7777U) ^ ((_x ^ (_x << 3)) & 0x8888U); \ + } while (0) + +/* LFSR2 and LFSR3 are inverses of each other */ +#define skinny64_inv_LFSR2(x) skinny64_LFSR3(x) +#define skinny64_inv_LFSR3(x) skinny64_LFSR2(x) + +#define skinny64_permute_tk(tk) \ + do { \ + /* PT = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7] */ \ + uint16_t row2 = tk[2]; \ + uint16_t row3 = tk[3]; \ + tk[2] = tk[0]; \ + tk[3] = tk[1]; \ + row3 = (row3 << 8) | (row3 >> 8); \ + tk[0] = ((row2 << 4) & 0xF000U) | \ + ((row2 >> 8) & 0x00F0U) | \ + ( row3 & 0x0F0FU); \ + tk[1] = ((row2 << 8) & 0xF000U) | \ + ((row3 >> 4) & 0x0F00U) | \ + ( row3 & 0x00F0U) | \ + ( row2 & 0x000FU); \ + } while (0) + +#define skinny64_inv_permute_tk(tk) \ + do { \ + /* PT' = [8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1] */ \ + uint16_t row0 = tk[0]; \ + uint16_t row1 = tk[1]; \ + tk[0] = tk[2]; \ + tk[1] = tk[3]; \ + tk[2] = ((row0 << 8) & 0xF000U) | \ + ((row0 >> 4) & 0x0F00U) | \ + ((row1 >> 8) & 0x00F0U) | \ + ( row1 & 0x000FU); \ + tk[3] = ((row1 << 8) & 0xF000U) | \ + ((row0 << 8) & 0x0F00U) | \ + ((row1 >> 4) & 0x00F0U) | \ + ((row0 >> 8) & 0x000FU); \ + } while (0) + +/* + * Apply the SKINNY-64 sbox. The original version from the + * specification is equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) + * #define SBOX_SHIFT(x) + * ((((x) << 1) & 0xEEEEU) | (((x) >> 3) & 0x1111U)) + * + * x = SBOX_MIX(x); + * x = SBOX_SHIFT(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT(x); + * return SBOX_MIX(x); + * + * However, we can mix the bits in their original positions and then + * delay the SBOX_SHIFT steps to be performed with one final rotation. + * This reduces the number of required shift operations from 14 to 10. + * + * We can further reduce the number of NOT operations from 4 to 2 + * using the technique from https://github.com/kste/skinny_avx to + * convert NOR-XOR operations into AND-XOR operations by converting + * the S-box into its NOT-inverse. + */ +#define skinny64_sbox(x) \ +do { \ + x = ~x; \ + x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ + x = (((x >> 2) & (x << 1)) & 0x2222U) ^ x; \ + x = ~x; \ + x = ((x >> 1) & 0x7777U) | ((x << 3) & 0x8888U); \ +} while (0) + +/* + * Apply the inverse of the SKINNY-64 sbox. The original version + * from the specification is equivalent to: + * + * #define SBOX_MIX(x) + * (((~((((x) >> 1) | (x)) >> 2)) & 0x1111U) ^ (x)) + * #define SBOX_SHIFT_INV(x) + * ((((x) >> 1) & 0x7777U) | (((x) << 3) & 0x8888U)) + * + * x = SBOX_MIX(x); + * x = SBOX_SHIFT_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT_INV(x); + * x = SBOX_MIX(x); + * x = SBOX_SHIFT_INV(x); + * return SBOX_MIX(x); + */ +#define skinny64_inv_sbox(x) \ +do { \ + x = ~x; \ + x = (((x >> 3) & (x >> 2)) & 0x1111U) ^ x; \ + x = (((x << 1) & (x >> 2)) & 0x2222U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x4444U) ^ x; \ + x = (((x << 1) & (x << 2)) & 0x8888U) ^ x; \ + x = ~x; \ + x = ((x << 1) & 0xEEEEU) | ((x >> 3) & 0x1111U); \ +} while (0) + +/** @endcond */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-util.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.c b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.c new file mode 100644 index 0000000..0abdeff --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.c @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "skinny-hash.h" +#include "internal-skinny128.h" +#include "internal-util.h" +#include + +aead_hash_algorithm_t const skinny_tk3_hash_algorithm = { + "SKINNY-tk3-HASH", + sizeof(int), + SKINNY_HASH_SIZE, + AEAD_FLAG_NONE, + skinny_tk3_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const skinny_tk2_hash_algorithm = { + "SKINNY-tk2-HASH", + sizeof(int), + SKINNY_HASH_SIZE, + AEAD_FLAG_NONE, + skinny_tk2_hash, + (aead_hash_init_t)0, + (aead_hash_update_t)0, + (aead_hash_finalize_t)0, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \brief Size of the permutation state for SKINNY-tk3-HASH. + */ +#define SKINNY_TK3_STATE_SIZE 48 + +/** + * \brief Size of the permutation state for SKINNY-tk2-HASH. + */ +#define SKINNY_TK2_STATE_SIZE 32 + +/** + * \brief Rate of absorbing data for SKINNY-tk3-HASH. + */ +#define SKINNY_TK3_HASH_RATE 16 + +/** + * \brief Rate of absorbing data for SKINNY-tk2-HASH. + */ +#define SKINNY_TK2_HASH_RATE 4 + +/** + * \brief Input block that is encrypted with the state for each + * block permutation of SKINNY-tk3-HASH or SKINNY-tk2-HASH. + */ +static unsigned char const skinny_hash_block[48] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/** + * \brief Permutes the internal state for SKINNY-tk3-HASH. + * + * \param state The state to be permuted. + */ +static void skinny_tk3_permute(unsigned char state[SKINNY_TK3_STATE_SIZE]) +{ + unsigned char temp[SKINNY_TK3_STATE_SIZE]; + skinny_128_384_encrypt_tk_full(state, temp, skinny_hash_block); + skinny_128_384_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); + skinny_128_384_encrypt_tk_full(state, temp + 32, skinny_hash_block + 32); + memcpy(state, temp, SKINNY_TK3_STATE_SIZE); +} + +/** + * \brief Permutes the internal state for SKINNY-tk2-HASH. + * + * \param state The state to be permuted. + */ +static void skinny_tk2_permute(unsigned char state[SKINNY_TK2_STATE_SIZE]) +{ + unsigned char temp[SKINNY_TK2_STATE_SIZE]; + skinny_128_256_encrypt_tk_full(state, temp, skinny_hash_block); + skinny_128_256_encrypt_tk_full(state, temp + 16, skinny_hash_block + 16); + memcpy(state, temp, SKINNY_TK2_STATE_SIZE); +} + +int skinny_tk3_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char state[SKINNY_TK3_STATE_SIZE]; + unsigned temp; + + /* Initialize the hash state */ + memset(state, 0, sizeof(state)); + state[SKINNY_TK3_HASH_RATE] = 0x80; + + /* Process as many full blocks as possible */ + while (inlen >= SKINNY_TK3_HASH_RATE) { + lw_xor_block(state, in, SKINNY_TK3_HASH_RATE); + skinny_tk3_permute(state); + in += SKINNY_TK3_HASH_RATE; + inlen -= SKINNY_TK3_HASH_RATE; + } + + /* Pad and process the last block */ + temp = (unsigned)inlen; + lw_xor_block(state, in, temp); + state[temp] ^= 0x80; /* padding */ + skinny_tk3_permute(state); + + /* Generate the hash output */ + memcpy(out, state, 16); + skinny_tk3_permute(state); + memcpy(out + 16, state, 16); + return 0; +} + +int skinny_tk2_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + unsigned char state[SKINNY_TK2_STATE_SIZE]; + unsigned temp; + + /* Initialize the hash state */ + memset(state, 0, sizeof(state)); + state[SKINNY_TK2_HASH_RATE] = 0x80; + + /* Process as many full blocks as possible */ + while (inlen >= SKINNY_TK2_HASH_RATE) { + lw_xor_block(state, in, SKINNY_TK2_HASH_RATE); + skinny_tk2_permute(state); + in += SKINNY_TK2_HASH_RATE; + inlen -= SKINNY_TK2_HASH_RATE; + } + + /* Pad and process the last block */ + temp = (unsigned)inlen; + lw_xor_block(state, in, temp); + state[temp] ^= 0x80; /* padding */ + skinny_tk2_permute(state); + + /* Generate the hash output */ + memcpy(out, state, 16); + skinny_tk2_permute(state); + memcpy(out + 16, state, 16); + return 0; +} diff --git a/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.h b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.h new file mode 100644 index 0000000..f75ce9f --- /dev/null +++ b/skinny/Implementations/crypto_hash/skinnyhashtk3/rhys/skinny-hash.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_SKINNY_HASH_H +#define LWCRYPTO_SKINNY_HASH_H + +#include "aead-common.h" + +/** + * \file skinny-hash.h + * \brief Hash algorithms based on the SKINNY block cipher. + * + * The SKINNY-AEAD family includes two hash algorithms: + * + * \li SKINNY-tk3-HASH with a 256-bit hash output, based around the + * SKINNY-128-384 tweakable block cipher. This is the primary hashing + * member of the family. + * \li SKINNY-tk2-HASH with a 256-bit hash output, based around the + * SKINNY-128-256 tweakable block cipher. + * + * References: https://sites.google.com/site/skinnycipher/home + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the hash output for SKINNY-tk3-HASH and SKINNY-tk2-HASH. + */ +#define SKINNY_HASH_SIZE 32 + +/** + * \brief Meta-information block for the SKINNY-tk3-HASH algorithm. + */ +extern aead_hash_algorithm_t const skinny_tk3_hash_algorithm; + +/** + * \brief Meta-information block for the SKINNY-tk2-HASH algorithm. + */ +extern aead_hash_algorithm_t const skinny_tk2_hash_algorithm; + +/** + * \brief Hashes a block of input data with SKINNY-tk3-HASH to + * generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * SKINNY_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int skinny_tk3_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Hashes a block of input data with SKINNY-tk2-HASH to + * generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * SKINNY_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int skinny_tk2_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.c b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.h b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/api.h b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/encrypt.c b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/encrypt.c deleted file mode 100644 index a56e57a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sparkle.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_128_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_128_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle-avr.S deleted file mode 100644 index 753ea2f..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle-avr.S +++ /dev/null @@ -1,2887 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global sparkle_256 - .type sparkle_256, @function -sparkle_256: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 129f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 129f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 129f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 129f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 129f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 129f - pop r18 - cpi r18,7 - brne 5094f - rjmp 615f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 129f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 129f - rjmp 615f -129: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - movw r18,r4 - movw r20,r6 - movw r4,r14 - movw r6,r12 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - movw r8,r18 - movw r10,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r14,r22 - movw r12,r26 - eor r14,r18 - eor r15,r19 - eor r12,r20 - eor r13,r21 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - movw r22,r16 - movw r26,r24 - eor r22,r28 - eor r23,r29 - eor r26,r2 - eor r27,r3 - movw r28,r14 - movw r2,r12 - ret -615: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_256, .-sparkle_256 - - .text -.global sparkle_384 - .type sparkle_384, @function -sparkle_384: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 140f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 140f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 140f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 140f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 140f - pop r18 - cpi r18,7 - brne 5094f - rjmp 886f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 140f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 140f - rjmp 886f -140: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r0,Z+4 - eor r18,r0 - ldd r0,Z+5 - eor r19,r0 - ldd r0,Z+6 - eor r20,r0 - ldd r0,Z+7 - eor r21,r0 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Z+28,r18 - std Z+29,r19 - std Z+30,r20 - std Z+31,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+36,r18 - std Z+37,r19 - std Z+38,r20 - std Z+39,r21 - eor r8,r14 - eor r9,r15 - eor r10,r12 - eor r11,r13 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+16 - ldd r29,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+24,r14 - std Z+25,r15 - std Z+26,r12 - std Z+27,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - eor r28,r16 - eor r29,r17 - eor r2,r24 - eor r3,r25 - ret -886: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_384, .-sparkle_384 - - .text -.global sparkle_512 - .type sparkle_512, @function -sparkle_512: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 151f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 151f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 151f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 151f - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 151f - pop r18 - cpi r18,8 - brne 5105f - rjmp 1189f -5105: - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,11 - eor r8,r18 - rcall 151f - rjmp 1189f -151: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+32,r22 - std Z+33,r23 - std Z+34,r26 - std Z+35,r27 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r22,Z+48 - ldd r23,Z+49 - ldd r26,Z+50 - ldd r27,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r28,Z+56 - ldd r29,Z+57 - ldd r2,Z+58 - ldd r3,Z+59 - ldd r8,Z+60 - ldd r9,Z+61 - ldd r10,Z+62 - ldd r11,Z+63 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Z+60,r8 - std Z+61,r9 - std Z+62,r10 - std Z+63,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+36,r8 - std Z+37,r9 - std Z+38,r10 - std Z+39,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r8,Z+52 - ldd r9,Z+53 - ldd r10,Z+54 - ldd r11,Z+55 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r0,Z+60 - eor r14,r0 - ldd r0,Z+61 - eor r15,r0 - ldd r0,Z+62 - eor r12,r0 - ldd r0,Z+63 - eor r13,r0 - std Z+20,r14 - std Z+21,r15 - std Z+22,r12 - std Z+23,r13 - movw r4,r18 - movw r6,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - std Z+48,r22 - std Z+49,r23 - std Z+50,r26 - std Z+51,r27 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r14,Z+24 - ldd r15,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+56,r14 - std Z+57,r15 - std Z+58,r12 - std Z+59,r13 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r22,r14 - eor r23,r15 - eor r26,r12 - eor r27,r13 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+32,r14 - std Z+33,r15 - std Z+34,r12 - std Z+35,r13 - ldd r14,Z+8 - ldd r15,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - movw r22,r18 - movw r26,r20 - std Z+40,r14 - std Z+41,r15 - std Z+42,r12 - std Z+43,r13 - ldd r28,Z+48 - ldd r29,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - eor r28,r14 - eor r29,r15 - eor r2,r12 - eor r3,r13 - std Z+48,r14 - std Z+49,r15 - std Z+50,r12 - std Z+51,r13 - ldd r0,Z+56 - eor r16,r0 - ldd r0,Z+57 - eor r17,r0 - ldd r0,Z+58 - eor r24,r0 - ldd r0,Z+59 - eor r25,r0 - std Z+16,r16 - std Z+17,r17 - std Z+18,r24 - std Z+19,r25 - ret -1189: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_512, .-sparkle_512 - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.c deleted file mode 100644 index 4a4c0fb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.c +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sparkle.h" - -#if !defined(__AVR__) - -/* The 8 basic round constants from the specification */ -#define RC_0 0xB7E15162 -#define RC_1 0xBF715880 -#define RC_2 0x38B4DA56 -#define RC_3 0x324E7738 -#define RC_4 0xBB1185EB -#define RC_5 0x4F7C7B57 -#define RC_6 0xCFBFA1C8 -#define RC_7 0xC2B3293D - -/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ -static uint32_t const sparkle_rc[12] = { - RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, - RC_0, RC_1, RC_2, RC_3 -}; - -/** - * \brief Alzette block cipher that implements the ARXbox layer of the - * SPARKLE permutation. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param k 32-bit round key. - */ -#define alzette(x, y, k) \ - do { \ - (x) += leftRotate1((y)); \ - (y) ^= leftRotate8((x)); \ - (x) ^= (k); \ - (x) += leftRotate15((y)); \ - (y) ^= leftRotate15((x)); \ - (x) ^= (k); \ - (x) += (y); \ - (y) ^= leftRotate1((x)); \ - (x) ^= (k); \ - (x) += leftRotate8((y)); \ - (y) ^= leftRotate16((x)); \ - (x) ^= (k); \ - } while (0) - -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3; - uint32_t y0, y1, y2, y3; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-256 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - - /* Linear layer */ - tx = x0 ^ x1; - ty = y0 ^ y1; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y2 ^= tx; - tx ^= y3; - y3 = y1; - y1 = y2 ^ y0; - y2 = y0; - y0 = tx ^ y3; - x2 ^= ty; - ty ^= x3; - x3 = x1; - x1 = x2 ^ x0; - x2 = x0; - x0 = ty ^ x3; - } - - /* Write the local variables back to the SPARKLE-256 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); -#endif -} - -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-384 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2; - ty = y0 ^ y1 ^ y2; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y3 ^= tx; - y4 ^= tx; - tx ^= y5; - y5 = y2; - y2 = y3 ^ y0; - y3 = y0; - y0 = y4 ^ y1; - y4 = y1; - y1 = tx ^ y5; - x3 ^= ty; - x4 ^= ty; - ty ^= x5; - x5 = x2; - x2 = x3 ^ x0; - x3 = x0; - x0 = x4 ^ x1; - x4 = x1; - x1 = ty ^ x5; - } - - /* Write the local variables back to the SPARKLE-384 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); -#endif -} - -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-512 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; - x6 = s[12]; - y6 = s[13]; - x7 = s[14]; - y7 = s[15]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); - x6 = le_load_word32((const uint8_t *)&(s[12])); - y6 = le_load_word32((const uint8_t *)&(s[13])); - x7 = le_load_word32((const uint8_t *)&(s[14])); - y7 = le_load_word32((const uint8_t *)&(s[15])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - alzette(x6, y6, RC_6); - alzette(x7, y7, RC_7); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2 ^ x3; - ty = y0 ^ y1 ^ y2 ^ y3; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y4 ^= tx; - y5 ^= tx; - y6 ^= tx; - tx ^= y7; - y7 = y3; - y3 = y4 ^ y0; - y4 = y0; - y0 = y5 ^ y1; - y5 = y1; - y1 = y6 ^ y2; - y6 = y2; - y2 = tx ^ y7; - x4 ^= ty; - x5 ^= ty; - x6 ^= ty; - ty ^= x7; - x7 = x3; - x3 = x4 ^ x0; - x4 = x0; - x0 = x5 ^ x1; - x5 = x1; - x1 = x6 ^ x2; - x6 = x2; - x2 = ty ^ x7; - } - - /* Write the local variables back to the SPARKLE-512 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; - s[12] = x6; - s[13] = y6; - s[14] = x7; - s[15] = y7; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); - le_store_word32((uint8_t *)&(s[12]), x6); - le_store_word32((uint8_t *)&(s[13]), y6); - le_store_word32((uint8_t *)&(s[14]), x7); - le_store_word32((uint8_t *)&(s[15]), y7); -#endif -} - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.h deleted file mode 100644 index fbdabc1..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-sparkle.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPARKLE_H -#define LW_INTERNAL_SPARKLE_H - -#include "internal-util.h" - -/** - * \file internal-sparkle.h - * \brief Internal implementation of the SPARKLE permutation. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for SPARKLE-256. - */ -#define SPARKLE_256_STATE_SIZE 8 - -/** - * \brief Size of the state for SPARKLE-384. - */ -#define SPARKLE_384_STATE_SIZE 12 - -/** - * \brief Size of the state for SPARKLE-512. - */ -#define SPARKLE_512_STATE_SIZE 16 - -/** - * \brief Performs the SPARKLE-256 permutation. - * - * \param s The words of the SPARKLE-256 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 10. - */ -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-384 permutation. - * - * \param s The words of the SPARKLE-384 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 11. - */ -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-512 permutation. - * - * \param s The words of the SPARKLE-512 state in little-endian byte order. - * \param steps The number of steps to perform, 8 or 12. - */ -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.c deleted file mode 100644 index e2aa25a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.c +++ /dev/null @@ -1,1135 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sparkle.h" -#include "internal-sparkle.h" -#include - -aead_cipher_t const schwaemm_256_128_cipher = { - "Schwaemm256-128", - SCHWAEMM_256_128_KEY_SIZE, - SCHWAEMM_256_128_NONCE_SIZE, - SCHWAEMM_256_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_128_aead_encrypt, - schwaemm_256_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_192_192_cipher = { - "Schwaemm192-192", - SCHWAEMM_192_192_KEY_SIZE, - SCHWAEMM_192_192_NONCE_SIZE, - SCHWAEMM_192_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_192_192_aead_encrypt, - schwaemm_192_192_aead_decrypt -}; - -aead_cipher_t const schwaemm_128_128_cipher = { - "Schwaemm128-128", - SCHWAEMM_128_128_KEY_SIZE, - SCHWAEMM_128_128_NONCE_SIZE, - SCHWAEMM_128_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_128_128_aead_encrypt, - schwaemm_128_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_256_256_cipher = { - "Schwaemm256-256", - SCHWAEMM_256_256_KEY_SIZE, - SCHWAEMM_256_256_NONCE_SIZE, - SCHWAEMM_256_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_256_aead_encrypt, - schwaemm_256_256_aead_decrypt -}; - -aead_hash_algorithm_t const esch_256_hash_algorithm = { - "Esch256", - sizeof(esch_256_hash_state_t), - ESCH_256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_256_hash, - (aead_hash_init_t)esch_256_hash_init, - (aead_hash_update_t)esch_256_hash_update, - (aead_hash_finalize_t)esch_256_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const esch_384_hash_algorithm = { - "Esch384", - sizeof(esch_384_hash_state_t), - ESCH_384_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_384_hash, - (aead_hash_init_t)esch_384_hash_init, - (aead_hash_update_t)esch_384_hash_update, - (aead_hash_finalize_t)esch_384_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \def DOMAIN(value) - * \brief Build a domain separation value as a 32-bit word. - * - * \param value The base value. - * \return The domain separation value as a 32-bit word. - */ -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define DOMAIN(value) (((uint32_t)(value)) << 24) -#else -#define DOMAIN(value) (value) -#endif - -/** - * \brief Rate at which bytes are processed by Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RIGHT(s) \ - (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_256_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[8]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[9]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[10]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_128_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_256_128_RATE; - adlen -= SCHWAEMM_256_128_RATE; - } - if (adlen == SCHWAEMM_256_128_RATE) { - s[11] ^= DOMAIN(0x05); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x04); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_256_128_RATE); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - mlen -= SCHWAEMM_256_128_RATE; - } - if (mlen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - memcpy(c, block, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return 0; -} - -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - clen -= SCHWAEMM_256_128_RATE; - } - if (clen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RATE 24 - -/** - * \brief Pointer to the left of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RIGHT(s) \ - (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_192_192_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[3] ^ s[6]; \ - s[3] ^= t ^ s[9]; \ - t = s[1]; \ - s[1] = s[4] ^ s[7]; \ - s[4] ^= t ^ s[10]; \ - t = s[2]; \ - s[2] = s[5] ^ s[8]; \ - s[5] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_192_192_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_192_192_RATE; - adlen -= SCHWAEMM_192_192_RATE; - } - if (adlen == SCHWAEMM_192_192_RATE) { - s[11] ^= DOMAIN(0x09); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x08); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_192_192_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_192_192_RATE); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - mlen -= SCHWAEMM_192_192_RATE; - } - if (mlen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - memcpy(c, block, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return 0; -} - -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_192_192_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_192_192_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - clen -= SCHWAEMM_192_192_RATE; - } - if (clen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RATE 16 - -/** - * \brief Pointer to the left of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RIGHT(s) \ - (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - */ -#define schwaemm_128_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[2] ^ s[4]; \ - s[2] ^= t ^ s[6]; \ - t = s[1]; \ - s[1] = s[3] ^ s[5]; \ - s[3] ^= t ^ s[7]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_128_128_authenticate - (uint32_t s[SPARKLE_256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - ad += SCHWAEMM_128_128_RATE; - adlen -= SCHWAEMM_128_128_RATE; - } - if (adlen == SCHWAEMM_128_128_RATE) { - s[7] ^= DOMAIN(0x05); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[7] ^= DOMAIN(0x04); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); -} - -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - uint8_t block[SCHWAEMM_128_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - memcpy(c, block, SCHWAEMM_128_128_RATE); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - mlen -= SCHWAEMM_128_128_RATE; - } - if (mlen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - memcpy(c, block, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_256(s, 10); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return 0; -} - -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_128_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_128_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - clen -= SCHWAEMM_128_128_RATE; - } - if (clen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RIGHT(s) \ - (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - */ -#define schwaemm_256_256_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[12]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[13]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[14]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[15]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_256_authenticate - (uint32_t s[SPARKLE_512_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - ad += SCHWAEMM_256_256_RATE; - adlen -= SCHWAEMM_256_256_RATE; - } - if (adlen == SCHWAEMM_256_256_RATE) { - s[15] ^= DOMAIN(0x11); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[15] ^= DOMAIN(0x10); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); -} - -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_256_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - memcpy(c, block, SCHWAEMM_256_256_RATE); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - mlen -= SCHWAEMM_256_256_RATE; - } - if (mlen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - memcpy(c, block, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_512(s, 12); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return 0; -} - -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_256_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_256_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - clen -= SCHWAEMM_256_256_RATE; - } - if (clen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Esch256. - */ -#define ESCH_256_RATE 16 - -/** - * \brief Perform the M3 step for Esch256 to mix the input with the state. - * - * \param s SPARKLE-384 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_256_m3(s, block, domain) \ - do { \ - uint32_t tx = (block)[0] ^ (block)[2]; \ - uint32_t ty = (block)[1] ^ (block)[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= (block)[0] ^ ty; \ - s[1] ^= (block)[1] ^ tx; \ - s[2] ^= (block)[2] ^ ty; \ - s[3] ^= (block)[3] ^ tx; \ - if ((domain) != 0) \ - s[5] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - } while (0) - -/** @cond esch_256 */ - -/** - * \brief Word-based state for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_384_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_256_hash_state_wt; - -/** @endcond */ - -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x00); - sparkle_384(s, 7); - in += ESCH_256_RATE; - inlen -= ESCH_256_RATE; - } - if (inlen == ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(s, block, 0x01); - } - sparkle_384(s, 11); - memcpy(out, s, ESCH_256_RATE); - sparkle_384(s, 7); - memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); - return 0; -} - -void esch_256_hash_init(esch_256_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_256_hash_state_t)); -} - -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x00); - sparkle_384(st->s.state, 7); - st->s.count = 0; - } - temp = ESCH_256_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(st->s.state, st->s.block, 0x01); - } - sparkle_384(st->s.state, 11); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_256_RATE); - sparkle_384(st->s.state, 7); - memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); -} - -/** - * \brief Rate at which bytes are processed by Esch384. - */ -#define ESCH_384_RATE 16 - -/** - * \brief Perform the M4 step for Esch384 to mix the input with the state. - * - * \param s SPARKLE-512 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_384_m4(s, block, domain) \ - do { \ - uint32_t tx = block[0] ^ block[2]; \ - uint32_t ty = block[1] ^ block[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= block[0] ^ ty; \ - s[1] ^= block[1] ^ tx; \ - s[2] ^= block[2] ^ ty; \ - s[3] ^= block[3] ^ tx; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - s[6] ^= ty; \ - s[7] ^= tx; \ - } while (0) - -/** @cond esch_384 */ - -/** - * \brief Word-based state for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_512_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_384_hash_state_wt; - -/** @endcond */ - -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x00); - sparkle_512(s, 8); - in += ESCH_384_RATE; - inlen -= ESCH_384_RATE; - } - if (inlen == ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(s, block, 0x01); - } - sparkle_512(s, 12); - memcpy(out, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); - return 0; -} - -void esch_384_hash_init(esch_384_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_384_hash_state_t)); -} - -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x00); - sparkle_512(st->s.state, 8); - st->s.count = 0; - } - temp = ESCH_384_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(st->s.state, st->s.block, 0x01); - } - sparkle_512(st->s.state, 12); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.h deleted file mode 100644 index dd0999e..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys-avr/sparkle.h +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPARKLE_H -#define LWCRYPTO_SPARKLE_H - -#include "aead-common.h" - -/** - * \file sparkle.h - * \brief Encryption and hash algorithms based on the SPARKLE permutation. - * - * SPARKLE is a family of encryption and hash algorithms that are based - * around the SPARKLE permutation. There are three versions of the - * permutation with 256-bit, 384-bit, and 512-bit state sizes. - * The algorithms in the family are: - * - * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. - * This is the primary encryption algorithm in the family. - * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. - * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. - * \li Esch256 hash algorithm with a 256-bit digest output. This is the - * primary hash algorithm in the family. - * \li Esch384 hash algorithm with a 384-bit digest output. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_NONCE_SIZE 32 - -/** - * \brief Size of the key for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash output for Esch256. - */ -#define ESCH_256_HASH_SIZE 32 - -/** - * \brief Size of the hash output for Esch384. - */ -#define ESCH_384_HASH_SIZE 48 - -/** - * \brief Meta-information block for the Schwaemm256-128 cipher. - */ -extern aead_cipher_t const schwaemm_256_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm192-192 cipher. - */ -extern aead_cipher_t const schwaemm_192_192_cipher; - -/** - * \brief Meta-information block for the Schwaemm128-128 cipher. - */ -extern aead_cipher_t const schwaemm_128_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm256-256 cipher. - */ -extern aead_cipher_t const schwaemm_256_256_cipher; - -/** - * \brief Meta-information block for the Esch256 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_256_hash_algorithm; - -/** - * \brief Meta-information block for the Esch384 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_384_hash_algorithm; - -/** - * \brief State information for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_256_hash_state_t; - -/** - * \brief State information for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[64]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_384_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_128_aead_decrypt() - */ -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_128_aead_encrypt() - */ -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm192-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 24 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_192_192_aead_decrypt() - */ -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm192-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 24 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_192_192_aead_encrypt() - */ -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm128-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_128_128_aead_decrypt() - */ -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm128-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_128_128_aead_encrypt() - */ -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_256_aead_decrypt() - */ -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_256_aead_encrypt() - */ -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Esch256 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch256 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() - */ -void esch_256_hash_init(esch_256_hash_state_t *state); - -/** - * \brief Updates an Esch256 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_256_hash_init(), esch_256_hash_finalize() - */ -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch256 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa esch_256_hash_init(), esch_256_hash_update() - */ -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with Esch384 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_384_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch384 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() - */ -void esch_384_hash_init(esch_384_hash_state_t *state); - -/** - * \brief Updates an Esch384 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_384_hash_init(), esch_384_hash_finalize() - */ -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch384 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 48-byte hash value. - * - * \sa esch_384_hash_init(), esch_384_hash_update() - */ -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle-avr.S new file mode 100644 index 0000000..753ea2f --- /dev/null +++ b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle-avr.S @@ -0,0 +1,2887 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global sparkle_256 + .type sparkle_256, @function +sparkle_256: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 129f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 129f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 129f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 129f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 129f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 129f + pop r18 + cpi r18,7 + brne 5094f + rjmp 615f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 129f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 129f + rjmp 615f +129: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + movw r18,r4 + movw r20,r6 + movw r4,r14 + movw r6,r12 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + movw r8,r18 + movw r10,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r14,r22 + movw r12,r26 + eor r14,r18 + eor r15,r19 + eor r12,r20 + eor r13,r21 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + movw r22,r16 + movw r26,r24 + eor r22,r28 + eor r23,r29 + eor r26,r2 + eor r27,r3 + movw r28,r14 + movw r2,r12 + ret +615: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_256, .-sparkle_256 + + .text +.global sparkle_384 + .type sparkle_384, @function +sparkle_384: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 140f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 140f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 140f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 140f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 140f + pop r18 + cpi r18,7 + brne 5094f + rjmp 886f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 140f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 140f + rjmp 886f +140: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r0,Z+4 + eor r18,r0 + ldd r0,Z+5 + eor r19,r0 + ldd r0,Z+6 + eor r20,r0 + ldd r0,Z+7 + eor r21,r0 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Z+28,r18 + std Z+29,r19 + std Z+30,r20 + std Z+31,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+36,r18 + std Z+37,r19 + std Z+38,r20 + std Z+39,r21 + eor r8,r14 + eor r9,r15 + eor r10,r12 + eor r11,r13 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+16 + ldd r29,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+24,r14 + std Z+25,r15 + std Z+26,r12 + std Z+27,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + eor r28,r16 + eor r29,r17 + eor r2,r24 + eor r3,r25 + ret +886: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_384, .-sparkle_384 + + .text +.global sparkle_512 + .type sparkle_512, @function +sparkle_512: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 151f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 151f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 151f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 151f + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 151f + pop r18 + cpi r18,8 + brne 5105f + rjmp 1189f +5105: + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,11 + eor r8,r18 + rcall 151f + rjmp 1189f +151: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+32,r22 + std Z+33,r23 + std Z+34,r26 + std Z+35,r27 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r22,Z+48 + ldd r23,Z+49 + ldd r26,Z+50 + ldd r27,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r28,Z+56 + ldd r29,Z+57 + ldd r2,Z+58 + ldd r3,Z+59 + ldd r8,Z+60 + ldd r9,Z+61 + ldd r10,Z+62 + ldd r11,Z+63 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Z+60,r8 + std Z+61,r9 + std Z+62,r10 + std Z+63,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+36,r8 + std Z+37,r9 + std Z+38,r10 + std Z+39,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r8,Z+52 + ldd r9,Z+53 + ldd r10,Z+54 + ldd r11,Z+55 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r0,Z+60 + eor r14,r0 + ldd r0,Z+61 + eor r15,r0 + ldd r0,Z+62 + eor r12,r0 + ldd r0,Z+63 + eor r13,r0 + std Z+20,r14 + std Z+21,r15 + std Z+22,r12 + std Z+23,r13 + movw r4,r18 + movw r6,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + std Z+48,r22 + std Z+49,r23 + std Z+50,r26 + std Z+51,r27 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r14,Z+24 + ldd r15,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+56,r14 + std Z+57,r15 + std Z+58,r12 + std Z+59,r13 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r22,r14 + eor r23,r15 + eor r26,r12 + eor r27,r13 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+32,r14 + std Z+33,r15 + std Z+34,r12 + std Z+35,r13 + ldd r14,Z+8 + ldd r15,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + movw r22,r18 + movw r26,r20 + std Z+40,r14 + std Z+41,r15 + std Z+42,r12 + std Z+43,r13 + ldd r28,Z+48 + ldd r29,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + eor r28,r14 + eor r29,r15 + eor r2,r12 + eor r3,r13 + std Z+48,r14 + std Z+49,r15 + std Z+50,r12 + std Z+51,r13 + ldd r0,Z+56 + eor r16,r0 + ldd r0,Z+57 + eor r17,r0 + ldd r0,Z+58 + eor r24,r0 + ldd r0,Z+59 + eor r25,r0 + std Z+16,r16 + std Z+17,r17 + std Z+18,r24 + std Z+19,r25 + ret +1189: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_512, .-sparkle_512 + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle.c index 822af50..4a4c0fb 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-sparkle.c @@ -22,6 +22,8 @@ #include "internal-sparkle.h" +#if !defined(__AVR__) + /* The 8 basic round constants from the specification */ #define RC_0 0xB7E15162 #define RC_1 0xBF715880 @@ -66,7 +68,7 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3; uint32_t y0, y1, y2, y3; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-256 state up into local variables */ @@ -105,18 +107,20 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1; ty = y0 ^ y1; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x3 ^ x1 ^ ty; - x3 = x1; - y0 = y3 ^ y1 ^ tx; + y2 ^= tx; + tx ^= y3; y3 = y1; - x1 = x2 ^ tw ^ ty; - x2 = tw; - y1 = y2 ^ tz ^ tx; - y2 = tz; + y1 = y2 ^ y0; + y2 = y0; + y0 = tx ^ y3; + x2 ^= ty; + ty ^= x3; + x3 = x1; + x1 = x2 ^ x0; + x2 = x0; + x0 = ty ^ x3; } /* Write the local variables back to the SPARKLE-256 state */ @@ -145,7 +149,7 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5; uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-384 state up into local variables */ @@ -194,22 +198,26 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2; ty = y0 ^ y1 ^ y2; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x4 ^ x1 ^ ty; - x4 = x1; - y0 = y4 ^ y1 ^ tx; + y3 ^= tx; + y4 ^= tx; + tx ^= y5; + y5 = y2; + y2 = y3 ^ y0; + y3 = y0; + y0 = y4 ^ y1; y4 = y1; - x1 = x5 ^ x2 ^ ty; + y1 = tx ^ y5; + x3 ^= ty; + x4 ^= ty; + ty ^= x5; x5 = x2; - y1 = y5 ^ y2 ^ tx; - y5 = y2; - x2 = x3 ^ tw ^ ty; - x3 = tw; - y2 = y3 ^ tz ^ tx; - y3 = tz; + x2 = x3 ^ x0; + x3 = x0; + x0 = x4 ^ x1; + x4 = x1; + x1 = ty ^ x5; } /* Write the local variables back to the SPARKLE-384 state */ @@ -246,7 +254,7 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-512 state up into local variables */ @@ -305,26 +313,32 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2 ^ x3; ty = y0 ^ y1 ^ y2 ^ y3; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x5 ^ x1 ^ ty; - x5 = x1; - y0 = y5 ^ y1 ^ tx; + y4 ^= tx; + y5 ^= tx; + y6 ^= tx; + tx ^= y7; + y7 = y3; + y3 = y4 ^ y0; + y4 = y0; + y0 = y5 ^ y1; y5 = y1; - x1 = x6 ^ x2 ^ ty; - x6 = x2; - y1 = y6 ^ y2 ^ tx; + y1 = y6 ^ y2; y6 = y2; - x2 = x7 ^ x3 ^ ty; + y2 = tx ^ y7; + x4 ^= ty; + x5 ^= ty; + x6 ^= ty; + ty ^= x7; x7 = x3; - y2 = y7 ^ y3 ^ tx; - y7 = y3; - x3 = x4 ^ tw ^ ty; - x4 = tw; - y3 = y4 ^ tz ^ tx; - y4 = tz; + x3 = x4 ^ x0; + x4 = x0; + x0 = x5 ^ x1; + x5 = x1; + x1 = x6 ^ x2; + x6 = x2; + x2 = ty ^ x7; } /* Write the local variables back to the SPARKLE-512 state */ @@ -364,3 +378,5 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) le_store_word32((uint8_t *)&(s[15]), y7); #endif } + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-util.h +++ b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/sparkle.c index b357de6..e2aa25a 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm128128v1/rhys/sparkle.c @@ -123,24 +123,21 @@ aead_hash_algorithm_t const esch_384_hash_algorithm = { * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_128_rho(s, domain) \ +#define schwaemm_256_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[8]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[9]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[10]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[8]; \ - s[5] ^= t1 ^ s[9]; \ - s[6] ^= t2 ^ s[10]; \ - s[7] ^= t3 ^ s[11]; \ + s[7] ^= t ^ s[11]; \ } while (0) /** @@ -155,18 +152,20 @@ static void schwaemm_256_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); ad += SCHWAEMM_256_128_RATE; adlen -= SCHWAEMM_256_128_RATE; } if (adlen == SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x05); + s[11] ^= DOMAIN(0x05); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_128_rho(s, 0x04); + s[11] ^= DOMAIN(0x04); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -202,7 +201,7 @@ int schwaemm_256_128_aead_encrypt while (mlen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_256_128_RATE); @@ -213,13 +212,15 @@ int schwaemm_256_128_aead_encrypt if (mlen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); memcpy(c, block, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -266,7 +267,7 @@ int schwaemm_256_128_aead_decrypt while (clen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); c += SCHWAEMM_256_128_RATE; @@ -276,12 +277,14 @@ int schwaemm_256_128_aead_decrypt if (clen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -315,21 +318,18 @@ int schwaemm_256_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_192_192_rho(s, domain) \ +#define schwaemm_192_192_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[3] ^ s[6]; \ + s[3] ^= t ^ s[9]; \ + t = s[1]; \ s[1] = s[4] ^ s[7]; \ + s[4] ^= t ^ s[10]; \ + t = s[2]; \ s[2] = s[5] ^ s[8]; \ - s[3] ^= t0 ^ s[9]; \ - s[4] ^= t1 ^ s[10]; \ - s[5] ^= t2 ^ s[11]; \ + s[5] ^= t ^ s[11]; \ } while (0) /** @@ -344,18 +344,20 @@ static void schwaemm_192_192_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); ad += SCHWAEMM_192_192_RATE; adlen -= SCHWAEMM_192_192_RATE; } if (adlen == SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x09); + s[11] ^= DOMAIN(0x09); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_192_192_rho(s, 0x08); + s[11] ^= DOMAIN(0x08); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -391,7 +393,7 @@ int schwaemm_192_192_aead_encrypt while (mlen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_192_192_RATE); @@ -402,13 +404,15 @@ int schwaemm_192_192_aead_encrypt if (mlen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); memcpy(c, block, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -455,7 +459,7 @@ int schwaemm_192_192_aead_decrypt while (clen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); c += SCHWAEMM_192_192_RATE; @@ -465,12 +469,14 @@ int schwaemm_192_192_aead_decrypt if (clen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -504,18 +510,15 @@ int schwaemm_192_192_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. * * \param s SPARKLE-256 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_128_128_rho(s, domain) \ +#define schwaemm_128_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[2] ^ s[4]; \ + s[2] ^= t ^ s[6]; \ + t = s[1]; \ s[1] = s[3] ^ s[5]; \ - s[2] ^= t0 ^ s[6]; \ - s[3] ^= t1 ^ s[7]; \ + s[3] ^= t ^ s[7]; \ } while (0) /** @@ -530,18 +533,20 @@ static void schwaemm_128_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); ad += SCHWAEMM_128_128_RATE; adlen -= SCHWAEMM_128_128_RATE; } if (adlen == SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x05); + s[7] ^= DOMAIN(0x05); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_128_128_rho(s, 0x04); + s[7] ^= DOMAIN(0x04); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -577,7 +582,7 @@ int schwaemm_128_128_aead_encrypt while (mlen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); memcpy(c, block, SCHWAEMM_128_128_RATE); @@ -588,13 +593,15 @@ int schwaemm_128_128_aead_encrypt if (mlen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); memcpy(c, block, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -641,7 +648,7 @@ int schwaemm_128_128_aead_decrypt while (clen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); c += SCHWAEMM_128_128_RATE; @@ -651,12 +658,14 @@ int schwaemm_128_128_aead_decrypt if (clen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -690,24 +699,21 @@ int schwaemm_128_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. * * \param s SPARKLE-512 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_256_rho(s, domain) \ +#define schwaemm_256_256_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[15] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[12]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[13]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[14]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[12]; \ - s[5] ^= t1 ^ s[13]; \ - s[6] ^= t2 ^ s[14]; \ - s[7] ^= t3 ^ s[15]; \ + s[7] ^= t ^ s[15]; \ } while (0) /** @@ -722,18 +728,20 @@ static void schwaemm_256_256_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); ad += SCHWAEMM_256_256_RATE; adlen -= SCHWAEMM_256_256_RATE; } if (adlen == SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x11); + s[15] ^= DOMAIN(0x11); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_256_rho(s, 0x10); + s[15] ^= DOMAIN(0x10); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -769,7 +777,7 @@ int schwaemm_256_256_aead_encrypt while (mlen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); memcpy(c, block, SCHWAEMM_256_256_RATE); @@ -780,13 +788,15 @@ int schwaemm_256_256_aead_encrypt if (mlen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); memcpy(c, block, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -833,7 +843,7 @@ int schwaemm_256_256_aead_decrypt while (clen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); c += SCHWAEMM_256_256_RATE; @@ -843,12 +853,14 @@ int schwaemm_256_256_aead_decrypt if (clen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.c b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.h b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/api.h b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/api.h deleted file mode 100644 index c340ebc..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 24 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 24 -#define CRYPTO_ABYTES 24 -#define CRYPTO_NOOVERLAP 1 diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/encrypt.c b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/encrypt.c deleted file mode 100644 index 43a4aac..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sparkle.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_192_192_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_192_192_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle-avr.S deleted file mode 100644 index 753ea2f..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle-avr.S +++ /dev/null @@ -1,2887 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global sparkle_256 - .type sparkle_256, @function -sparkle_256: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 129f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 129f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 129f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 129f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 129f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 129f - pop r18 - cpi r18,7 - brne 5094f - rjmp 615f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 129f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 129f - rjmp 615f -129: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - movw r18,r4 - movw r20,r6 - movw r4,r14 - movw r6,r12 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - movw r8,r18 - movw r10,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r14,r22 - movw r12,r26 - eor r14,r18 - eor r15,r19 - eor r12,r20 - eor r13,r21 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - movw r22,r16 - movw r26,r24 - eor r22,r28 - eor r23,r29 - eor r26,r2 - eor r27,r3 - movw r28,r14 - movw r2,r12 - ret -615: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_256, .-sparkle_256 - - .text -.global sparkle_384 - .type sparkle_384, @function -sparkle_384: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 140f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 140f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 140f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 140f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 140f - pop r18 - cpi r18,7 - brne 5094f - rjmp 886f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 140f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 140f - rjmp 886f -140: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r0,Z+4 - eor r18,r0 - ldd r0,Z+5 - eor r19,r0 - ldd r0,Z+6 - eor r20,r0 - ldd r0,Z+7 - eor r21,r0 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Z+28,r18 - std Z+29,r19 - std Z+30,r20 - std Z+31,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+36,r18 - std Z+37,r19 - std Z+38,r20 - std Z+39,r21 - eor r8,r14 - eor r9,r15 - eor r10,r12 - eor r11,r13 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+16 - ldd r29,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+24,r14 - std Z+25,r15 - std Z+26,r12 - std Z+27,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - eor r28,r16 - eor r29,r17 - eor r2,r24 - eor r3,r25 - ret -886: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_384, .-sparkle_384 - - .text -.global sparkle_512 - .type sparkle_512, @function -sparkle_512: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 151f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 151f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 151f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 151f - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 151f - pop r18 - cpi r18,8 - brne 5105f - rjmp 1189f -5105: - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,11 - eor r8,r18 - rcall 151f - rjmp 1189f -151: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+32,r22 - std Z+33,r23 - std Z+34,r26 - std Z+35,r27 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r22,Z+48 - ldd r23,Z+49 - ldd r26,Z+50 - ldd r27,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r28,Z+56 - ldd r29,Z+57 - ldd r2,Z+58 - ldd r3,Z+59 - ldd r8,Z+60 - ldd r9,Z+61 - ldd r10,Z+62 - ldd r11,Z+63 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Z+60,r8 - std Z+61,r9 - std Z+62,r10 - std Z+63,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+36,r8 - std Z+37,r9 - std Z+38,r10 - std Z+39,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r8,Z+52 - ldd r9,Z+53 - ldd r10,Z+54 - ldd r11,Z+55 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r0,Z+60 - eor r14,r0 - ldd r0,Z+61 - eor r15,r0 - ldd r0,Z+62 - eor r12,r0 - ldd r0,Z+63 - eor r13,r0 - std Z+20,r14 - std Z+21,r15 - std Z+22,r12 - std Z+23,r13 - movw r4,r18 - movw r6,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - std Z+48,r22 - std Z+49,r23 - std Z+50,r26 - std Z+51,r27 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r14,Z+24 - ldd r15,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+56,r14 - std Z+57,r15 - std Z+58,r12 - std Z+59,r13 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r22,r14 - eor r23,r15 - eor r26,r12 - eor r27,r13 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+32,r14 - std Z+33,r15 - std Z+34,r12 - std Z+35,r13 - ldd r14,Z+8 - ldd r15,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - movw r22,r18 - movw r26,r20 - std Z+40,r14 - std Z+41,r15 - std Z+42,r12 - std Z+43,r13 - ldd r28,Z+48 - ldd r29,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - eor r28,r14 - eor r29,r15 - eor r2,r12 - eor r3,r13 - std Z+48,r14 - std Z+49,r15 - std Z+50,r12 - std Z+51,r13 - ldd r0,Z+56 - eor r16,r0 - ldd r0,Z+57 - eor r17,r0 - ldd r0,Z+58 - eor r24,r0 - ldd r0,Z+59 - eor r25,r0 - std Z+16,r16 - std Z+17,r17 - std Z+18,r24 - std Z+19,r25 - ret -1189: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_512, .-sparkle_512 - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.c deleted file mode 100644 index 4a4c0fb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.c +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sparkle.h" - -#if !defined(__AVR__) - -/* The 8 basic round constants from the specification */ -#define RC_0 0xB7E15162 -#define RC_1 0xBF715880 -#define RC_2 0x38B4DA56 -#define RC_3 0x324E7738 -#define RC_4 0xBB1185EB -#define RC_5 0x4F7C7B57 -#define RC_6 0xCFBFA1C8 -#define RC_7 0xC2B3293D - -/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ -static uint32_t const sparkle_rc[12] = { - RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, - RC_0, RC_1, RC_2, RC_3 -}; - -/** - * \brief Alzette block cipher that implements the ARXbox layer of the - * SPARKLE permutation. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param k 32-bit round key. - */ -#define alzette(x, y, k) \ - do { \ - (x) += leftRotate1((y)); \ - (y) ^= leftRotate8((x)); \ - (x) ^= (k); \ - (x) += leftRotate15((y)); \ - (y) ^= leftRotate15((x)); \ - (x) ^= (k); \ - (x) += (y); \ - (y) ^= leftRotate1((x)); \ - (x) ^= (k); \ - (x) += leftRotate8((y)); \ - (y) ^= leftRotate16((x)); \ - (x) ^= (k); \ - } while (0) - -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3; - uint32_t y0, y1, y2, y3; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-256 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - - /* Linear layer */ - tx = x0 ^ x1; - ty = y0 ^ y1; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y2 ^= tx; - tx ^= y3; - y3 = y1; - y1 = y2 ^ y0; - y2 = y0; - y0 = tx ^ y3; - x2 ^= ty; - ty ^= x3; - x3 = x1; - x1 = x2 ^ x0; - x2 = x0; - x0 = ty ^ x3; - } - - /* Write the local variables back to the SPARKLE-256 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); -#endif -} - -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-384 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2; - ty = y0 ^ y1 ^ y2; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y3 ^= tx; - y4 ^= tx; - tx ^= y5; - y5 = y2; - y2 = y3 ^ y0; - y3 = y0; - y0 = y4 ^ y1; - y4 = y1; - y1 = tx ^ y5; - x3 ^= ty; - x4 ^= ty; - ty ^= x5; - x5 = x2; - x2 = x3 ^ x0; - x3 = x0; - x0 = x4 ^ x1; - x4 = x1; - x1 = ty ^ x5; - } - - /* Write the local variables back to the SPARKLE-384 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); -#endif -} - -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-512 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; - x6 = s[12]; - y6 = s[13]; - x7 = s[14]; - y7 = s[15]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); - x6 = le_load_word32((const uint8_t *)&(s[12])); - y6 = le_load_word32((const uint8_t *)&(s[13])); - x7 = le_load_word32((const uint8_t *)&(s[14])); - y7 = le_load_word32((const uint8_t *)&(s[15])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - alzette(x6, y6, RC_6); - alzette(x7, y7, RC_7); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2 ^ x3; - ty = y0 ^ y1 ^ y2 ^ y3; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y4 ^= tx; - y5 ^= tx; - y6 ^= tx; - tx ^= y7; - y7 = y3; - y3 = y4 ^ y0; - y4 = y0; - y0 = y5 ^ y1; - y5 = y1; - y1 = y6 ^ y2; - y6 = y2; - y2 = tx ^ y7; - x4 ^= ty; - x5 ^= ty; - x6 ^= ty; - ty ^= x7; - x7 = x3; - x3 = x4 ^ x0; - x4 = x0; - x0 = x5 ^ x1; - x5 = x1; - x1 = x6 ^ x2; - x6 = x2; - x2 = ty ^ x7; - } - - /* Write the local variables back to the SPARKLE-512 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; - s[12] = x6; - s[13] = y6; - s[14] = x7; - s[15] = y7; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); - le_store_word32((uint8_t *)&(s[12]), x6); - le_store_word32((uint8_t *)&(s[13]), y6); - le_store_word32((uint8_t *)&(s[14]), x7); - le_store_word32((uint8_t *)&(s[15]), y7); -#endif -} - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.h deleted file mode 100644 index fbdabc1..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-sparkle.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPARKLE_H -#define LW_INTERNAL_SPARKLE_H - -#include "internal-util.h" - -/** - * \file internal-sparkle.h - * \brief Internal implementation of the SPARKLE permutation. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for SPARKLE-256. - */ -#define SPARKLE_256_STATE_SIZE 8 - -/** - * \brief Size of the state for SPARKLE-384. - */ -#define SPARKLE_384_STATE_SIZE 12 - -/** - * \brief Size of the state for SPARKLE-512. - */ -#define SPARKLE_512_STATE_SIZE 16 - -/** - * \brief Performs the SPARKLE-256 permutation. - * - * \param s The words of the SPARKLE-256 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 10. - */ -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-384 permutation. - * - * \param s The words of the SPARKLE-384 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 11. - */ -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-512 permutation. - * - * \param s The words of the SPARKLE-512 state in little-endian byte order. - * \param steps The number of steps to perform, 8 or 12. - */ -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.c deleted file mode 100644 index e2aa25a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.c +++ /dev/null @@ -1,1135 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sparkle.h" -#include "internal-sparkle.h" -#include - -aead_cipher_t const schwaemm_256_128_cipher = { - "Schwaemm256-128", - SCHWAEMM_256_128_KEY_SIZE, - SCHWAEMM_256_128_NONCE_SIZE, - SCHWAEMM_256_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_128_aead_encrypt, - schwaemm_256_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_192_192_cipher = { - "Schwaemm192-192", - SCHWAEMM_192_192_KEY_SIZE, - SCHWAEMM_192_192_NONCE_SIZE, - SCHWAEMM_192_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_192_192_aead_encrypt, - schwaemm_192_192_aead_decrypt -}; - -aead_cipher_t const schwaemm_128_128_cipher = { - "Schwaemm128-128", - SCHWAEMM_128_128_KEY_SIZE, - SCHWAEMM_128_128_NONCE_SIZE, - SCHWAEMM_128_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_128_128_aead_encrypt, - schwaemm_128_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_256_256_cipher = { - "Schwaemm256-256", - SCHWAEMM_256_256_KEY_SIZE, - SCHWAEMM_256_256_NONCE_SIZE, - SCHWAEMM_256_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_256_aead_encrypt, - schwaemm_256_256_aead_decrypt -}; - -aead_hash_algorithm_t const esch_256_hash_algorithm = { - "Esch256", - sizeof(esch_256_hash_state_t), - ESCH_256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_256_hash, - (aead_hash_init_t)esch_256_hash_init, - (aead_hash_update_t)esch_256_hash_update, - (aead_hash_finalize_t)esch_256_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const esch_384_hash_algorithm = { - "Esch384", - sizeof(esch_384_hash_state_t), - ESCH_384_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_384_hash, - (aead_hash_init_t)esch_384_hash_init, - (aead_hash_update_t)esch_384_hash_update, - (aead_hash_finalize_t)esch_384_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \def DOMAIN(value) - * \brief Build a domain separation value as a 32-bit word. - * - * \param value The base value. - * \return The domain separation value as a 32-bit word. - */ -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define DOMAIN(value) (((uint32_t)(value)) << 24) -#else -#define DOMAIN(value) (value) -#endif - -/** - * \brief Rate at which bytes are processed by Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RIGHT(s) \ - (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_256_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[8]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[9]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[10]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_128_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_256_128_RATE; - adlen -= SCHWAEMM_256_128_RATE; - } - if (adlen == SCHWAEMM_256_128_RATE) { - s[11] ^= DOMAIN(0x05); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x04); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_256_128_RATE); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - mlen -= SCHWAEMM_256_128_RATE; - } - if (mlen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - memcpy(c, block, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return 0; -} - -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - clen -= SCHWAEMM_256_128_RATE; - } - if (clen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RATE 24 - -/** - * \brief Pointer to the left of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RIGHT(s) \ - (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_192_192_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[3] ^ s[6]; \ - s[3] ^= t ^ s[9]; \ - t = s[1]; \ - s[1] = s[4] ^ s[7]; \ - s[4] ^= t ^ s[10]; \ - t = s[2]; \ - s[2] = s[5] ^ s[8]; \ - s[5] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_192_192_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_192_192_RATE; - adlen -= SCHWAEMM_192_192_RATE; - } - if (adlen == SCHWAEMM_192_192_RATE) { - s[11] ^= DOMAIN(0x09); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x08); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_192_192_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_192_192_RATE); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - mlen -= SCHWAEMM_192_192_RATE; - } - if (mlen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - memcpy(c, block, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return 0; -} - -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_192_192_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_192_192_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - clen -= SCHWAEMM_192_192_RATE; - } - if (clen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RATE 16 - -/** - * \brief Pointer to the left of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RIGHT(s) \ - (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - */ -#define schwaemm_128_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[2] ^ s[4]; \ - s[2] ^= t ^ s[6]; \ - t = s[1]; \ - s[1] = s[3] ^ s[5]; \ - s[3] ^= t ^ s[7]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_128_128_authenticate - (uint32_t s[SPARKLE_256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - ad += SCHWAEMM_128_128_RATE; - adlen -= SCHWAEMM_128_128_RATE; - } - if (adlen == SCHWAEMM_128_128_RATE) { - s[7] ^= DOMAIN(0x05); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[7] ^= DOMAIN(0x04); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); -} - -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - uint8_t block[SCHWAEMM_128_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - memcpy(c, block, SCHWAEMM_128_128_RATE); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - mlen -= SCHWAEMM_128_128_RATE; - } - if (mlen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - memcpy(c, block, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_256(s, 10); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return 0; -} - -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_128_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_128_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - clen -= SCHWAEMM_128_128_RATE; - } - if (clen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RIGHT(s) \ - (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - */ -#define schwaemm_256_256_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[12]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[13]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[14]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[15]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_256_authenticate - (uint32_t s[SPARKLE_512_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - ad += SCHWAEMM_256_256_RATE; - adlen -= SCHWAEMM_256_256_RATE; - } - if (adlen == SCHWAEMM_256_256_RATE) { - s[15] ^= DOMAIN(0x11); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[15] ^= DOMAIN(0x10); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); -} - -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_256_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - memcpy(c, block, SCHWAEMM_256_256_RATE); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - mlen -= SCHWAEMM_256_256_RATE; - } - if (mlen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - memcpy(c, block, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_512(s, 12); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return 0; -} - -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_256_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_256_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - clen -= SCHWAEMM_256_256_RATE; - } - if (clen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Esch256. - */ -#define ESCH_256_RATE 16 - -/** - * \brief Perform the M3 step for Esch256 to mix the input with the state. - * - * \param s SPARKLE-384 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_256_m3(s, block, domain) \ - do { \ - uint32_t tx = (block)[0] ^ (block)[2]; \ - uint32_t ty = (block)[1] ^ (block)[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= (block)[0] ^ ty; \ - s[1] ^= (block)[1] ^ tx; \ - s[2] ^= (block)[2] ^ ty; \ - s[3] ^= (block)[3] ^ tx; \ - if ((domain) != 0) \ - s[5] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - } while (0) - -/** @cond esch_256 */ - -/** - * \brief Word-based state for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_384_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_256_hash_state_wt; - -/** @endcond */ - -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x00); - sparkle_384(s, 7); - in += ESCH_256_RATE; - inlen -= ESCH_256_RATE; - } - if (inlen == ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(s, block, 0x01); - } - sparkle_384(s, 11); - memcpy(out, s, ESCH_256_RATE); - sparkle_384(s, 7); - memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); - return 0; -} - -void esch_256_hash_init(esch_256_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_256_hash_state_t)); -} - -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x00); - sparkle_384(st->s.state, 7); - st->s.count = 0; - } - temp = ESCH_256_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(st->s.state, st->s.block, 0x01); - } - sparkle_384(st->s.state, 11); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_256_RATE); - sparkle_384(st->s.state, 7); - memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); -} - -/** - * \brief Rate at which bytes are processed by Esch384. - */ -#define ESCH_384_RATE 16 - -/** - * \brief Perform the M4 step for Esch384 to mix the input with the state. - * - * \param s SPARKLE-512 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_384_m4(s, block, domain) \ - do { \ - uint32_t tx = block[0] ^ block[2]; \ - uint32_t ty = block[1] ^ block[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= block[0] ^ ty; \ - s[1] ^= block[1] ^ tx; \ - s[2] ^= block[2] ^ ty; \ - s[3] ^= block[3] ^ tx; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - s[6] ^= ty; \ - s[7] ^= tx; \ - } while (0) - -/** @cond esch_384 */ - -/** - * \brief Word-based state for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_512_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_384_hash_state_wt; - -/** @endcond */ - -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x00); - sparkle_512(s, 8); - in += ESCH_384_RATE; - inlen -= ESCH_384_RATE; - } - if (inlen == ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(s, block, 0x01); - } - sparkle_512(s, 12); - memcpy(out, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); - return 0; -} - -void esch_384_hash_init(esch_384_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_384_hash_state_t)); -} - -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x00); - sparkle_512(st->s.state, 8); - st->s.count = 0; - } - temp = ESCH_384_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(st->s.state, st->s.block, 0x01); - } - sparkle_512(st->s.state, 12); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.h deleted file mode 100644 index dd0999e..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys-avr/sparkle.h +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPARKLE_H -#define LWCRYPTO_SPARKLE_H - -#include "aead-common.h" - -/** - * \file sparkle.h - * \brief Encryption and hash algorithms based on the SPARKLE permutation. - * - * SPARKLE is a family of encryption and hash algorithms that are based - * around the SPARKLE permutation. There are three versions of the - * permutation with 256-bit, 384-bit, and 512-bit state sizes. - * The algorithms in the family are: - * - * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. - * This is the primary encryption algorithm in the family. - * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. - * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. - * \li Esch256 hash algorithm with a 256-bit digest output. This is the - * primary hash algorithm in the family. - * \li Esch384 hash algorithm with a 384-bit digest output. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_NONCE_SIZE 32 - -/** - * \brief Size of the key for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash output for Esch256. - */ -#define ESCH_256_HASH_SIZE 32 - -/** - * \brief Size of the hash output for Esch384. - */ -#define ESCH_384_HASH_SIZE 48 - -/** - * \brief Meta-information block for the Schwaemm256-128 cipher. - */ -extern aead_cipher_t const schwaemm_256_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm192-192 cipher. - */ -extern aead_cipher_t const schwaemm_192_192_cipher; - -/** - * \brief Meta-information block for the Schwaemm128-128 cipher. - */ -extern aead_cipher_t const schwaemm_128_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm256-256 cipher. - */ -extern aead_cipher_t const schwaemm_256_256_cipher; - -/** - * \brief Meta-information block for the Esch256 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_256_hash_algorithm; - -/** - * \brief Meta-information block for the Esch384 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_384_hash_algorithm; - -/** - * \brief State information for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_256_hash_state_t; - -/** - * \brief State information for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[64]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_384_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_128_aead_decrypt() - */ -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_128_aead_encrypt() - */ -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm192-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 24 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_192_192_aead_decrypt() - */ -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm192-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 24 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_192_192_aead_encrypt() - */ -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm128-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_128_128_aead_decrypt() - */ -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm128-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_128_128_aead_encrypt() - */ -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_256_aead_decrypt() - */ -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_256_aead_encrypt() - */ -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Esch256 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch256 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() - */ -void esch_256_hash_init(esch_256_hash_state_t *state); - -/** - * \brief Updates an Esch256 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_256_hash_init(), esch_256_hash_finalize() - */ -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch256 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa esch_256_hash_init(), esch_256_hash_update() - */ -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with Esch384 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_384_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch384 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() - */ -void esch_384_hash_init(esch_384_hash_state_t *state); - -/** - * \brief Updates an Esch384 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_384_hash_init(), esch_384_hash_finalize() - */ -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch384 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 48-byte hash value. - * - * \sa esch_384_hash_init(), esch_384_hash_update() - */ -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle-avr.S new file mode 100644 index 0000000..753ea2f --- /dev/null +++ b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle-avr.S @@ -0,0 +1,2887 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global sparkle_256 + .type sparkle_256, @function +sparkle_256: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 129f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 129f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 129f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 129f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 129f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 129f + pop r18 + cpi r18,7 + brne 5094f + rjmp 615f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 129f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 129f + rjmp 615f +129: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + movw r18,r4 + movw r20,r6 + movw r4,r14 + movw r6,r12 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + movw r8,r18 + movw r10,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r14,r22 + movw r12,r26 + eor r14,r18 + eor r15,r19 + eor r12,r20 + eor r13,r21 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + movw r22,r16 + movw r26,r24 + eor r22,r28 + eor r23,r29 + eor r26,r2 + eor r27,r3 + movw r28,r14 + movw r2,r12 + ret +615: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_256, .-sparkle_256 + + .text +.global sparkle_384 + .type sparkle_384, @function +sparkle_384: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 140f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 140f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 140f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 140f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 140f + pop r18 + cpi r18,7 + brne 5094f + rjmp 886f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 140f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 140f + rjmp 886f +140: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r0,Z+4 + eor r18,r0 + ldd r0,Z+5 + eor r19,r0 + ldd r0,Z+6 + eor r20,r0 + ldd r0,Z+7 + eor r21,r0 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Z+28,r18 + std Z+29,r19 + std Z+30,r20 + std Z+31,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+36,r18 + std Z+37,r19 + std Z+38,r20 + std Z+39,r21 + eor r8,r14 + eor r9,r15 + eor r10,r12 + eor r11,r13 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+16 + ldd r29,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+24,r14 + std Z+25,r15 + std Z+26,r12 + std Z+27,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + eor r28,r16 + eor r29,r17 + eor r2,r24 + eor r3,r25 + ret +886: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_384, .-sparkle_384 + + .text +.global sparkle_512 + .type sparkle_512, @function +sparkle_512: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 151f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 151f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 151f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 151f + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 151f + pop r18 + cpi r18,8 + brne 5105f + rjmp 1189f +5105: + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,11 + eor r8,r18 + rcall 151f + rjmp 1189f +151: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+32,r22 + std Z+33,r23 + std Z+34,r26 + std Z+35,r27 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r22,Z+48 + ldd r23,Z+49 + ldd r26,Z+50 + ldd r27,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r28,Z+56 + ldd r29,Z+57 + ldd r2,Z+58 + ldd r3,Z+59 + ldd r8,Z+60 + ldd r9,Z+61 + ldd r10,Z+62 + ldd r11,Z+63 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Z+60,r8 + std Z+61,r9 + std Z+62,r10 + std Z+63,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+36,r8 + std Z+37,r9 + std Z+38,r10 + std Z+39,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r8,Z+52 + ldd r9,Z+53 + ldd r10,Z+54 + ldd r11,Z+55 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r0,Z+60 + eor r14,r0 + ldd r0,Z+61 + eor r15,r0 + ldd r0,Z+62 + eor r12,r0 + ldd r0,Z+63 + eor r13,r0 + std Z+20,r14 + std Z+21,r15 + std Z+22,r12 + std Z+23,r13 + movw r4,r18 + movw r6,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + std Z+48,r22 + std Z+49,r23 + std Z+50,r26 + std Z+51,r27 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r14,Z+24 + ldd r15,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+56,r14 + std Z+57,r15 + std Z+58,r12 + std Z+59,r13 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r22,r14 + eor r23,r15 + eor r26,r12 + eor r27,r13 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+32,r14 + std Z+33,r15 + std Z+34,r12 + std Z+35,r13 + ldd r14,Z+8 + ldd r15,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + movw r22,r18 + movw r26,r20 + std Z+40,r14 + std Z+41,r15 + std Z+42,r12 + std Z+43,r13 + ldd r28,Z+48 + ldd r29,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + eor r28,r14 + eor r29,r15 + eor r2,r12 + eor r3,r13 + std Z+48,r14 + std Z+49,r15 + std Z+50,r12 + std Z+51,r13 + ldd r0,Z+56 + eor r16,r0 + ldd r0,Z+57 + eor r17,r0 + ldd r0,Z+58 + eor r24,r0 + ldd r0,Z+59 + eor r25,r0 + std Z+16,r16 + std Z+17,r17 + std Z+18,r24 + std Z+19,r25 + ret +1189: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_512, .-sparkle_512 + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle.c index 822af50..4a4c0fb 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-sparkle.c @@ -22,6 +22,8 @@ #include "internal-sparkle.h" +#if !defined(__AVR__) + /* The 8 basic round constants from the specification */ #define RC_0 0xB7E15162 #define RC_1 0xBF715880 @@ -66,7 +68,7 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3; uint32_t y0, y1, y2, y3; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-256 state up into local variables */ @@ -105,18 +107,20 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1; ty = y0 ^ y1; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x3 ^ x1 ^ ty; - x3 = x1; - y0 = y3 ^ y1 ^ tx; + y2 ^= tx; + tx ^= y3; y3 = y1; - x1 = x2 ^ tw ^ ty; - x2 = tw; - y1 = y2 ^ tz ^ tx; - y2 = tz; + y1 = y2 ^ y0; + y2 = y0; + y0 = tx ^ y3; + x2 ^= ty; + ty ^= x3; + x3 = x1; + x1 = x2 ^ x0; + x2 = x0; + x0 = ty ^ x3; } /* Write the local variables back to the SPARKLE-256 state */ @@ -145,7 +149,7 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5; uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-384 state up into local variables */ @@ -194,22 +198,26 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2; ty = y0 ^ y1 ^ y2; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x4 ^ x1 ^ ty; - x4 = x1; - y0 = y4 ^ y1 ^ tx; + y3 ^= tx; + y4 ^= tx; + tx ^= y5; + y5 = y2; + y2 = y3 ^ y0; + y3 = y0; + y0 = y4 ^ y1; y4 = y1; - x1 = x5 ^ x2 ^ ty; + y1 = tx ^ y5; + x3 ^= ty; + x4 ^= ty; + ty ^= x5; x5 = x2; - y1 = y5 ^ y2 ^ tx; - y5 = y2; - x2 = x3 ^ tw ^ ty; - x3 = tw; - y2 = y3 ^ tz ^ tx; - y3 = tz; + x2 = x3 ^ x0; + x3 = x0; + x0 = x4 ^ x1; + x4 = x1; + x1 = ty ^ x5; } /* Write the local variables back to the SPARKLE-384 state */ @@ -246,7 +254,7 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-512 state up into local variables */ @@ -305,26 +313,32 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2 ^ x3; ty = y0 ^ y1 ^ y2 ^ y3; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x5 ^ x1 ^ ty; - x5 = x1; - y0 = y5 ^ y1 ^ tx; + y4 ^= tx; + y5 ^= tx; + y6 ^= tx; + tx ^= y7; + y7 = y3; + y3 = y4 ^ y0; + y4 = y0; + y0 = y5 ^ y1; y5 = y1; - x1 = x6 ^ x2 ^ ty; - x6 = x2; - y1 = y6 ^ y2 ^ tx; + y1 = y6 ^ y2; y6 = y2; - x2 = x7 ^ x3 ^ ty; + y2 = tx ^ y7; + x4 ^= ty; + x5 ^= ty; + x6 ^= ty; + ty ^= x7; x7 = x3; - y2 = y7 ^ y3 ^ tx; - y7 = y3; - x3 = x4 ^ tw ^ ty; - x4 = tw; - y3 = y4 ^ tz ^ tx; - y4 = tz; + x3 = x4 ^ x0; + x4 = x0; + x0 = x5 ^ x1; + x5 = x1; + x1 = x6 ^ x2; + x6 = x2; + x2 = ty ^ x7; } /* Write the local variables back to the SPARKLE-512 state */ @@ -364,3 +378,5 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) le_store_word32((uint8_t *)&(s[15]), y7); #endif } + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-util.h +++ b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/sparkle.c index b357de6..e2aa25a 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm192192v1/rhys/sparkle.c @@ -123,24 +123,21 @@ aead_hash_algorithm_t const esch_384_hash_algorithm = { * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_128_rho(s, domain) \ +#define schwaemm_256_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[8]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[9]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[10]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[8]; \ - s[5] ^= t1 ^ s[9]; \ - s[6] ^= t2 ^ s[10]; \ - s[7] ^= t3 ^ s[11]; \ + s[7] ^= t ^ s[11]; \ } while (0) /** @@ -155,18 +152,20 @@ static void schwaemm_256_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); ad += SCHWAEMM_256_128_RATE; adlen -= SCHWAEMM_256_128_RATE; } if (adlen == SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x05); + s[11] ^= DOMAIN(0x05); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_128_rho(s, 0x04); + s[11] ^= DOMAIN(0x04); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -202,7 +201,7 @@ int schwaemm_256_128_aead_encrypt while (mlen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_256_128_RATE); @@ -213,13 +212,15 @@ int schwaemm_256_128_aead_encrypt if (mlen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); memcpy(c, block, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -266,7 +267,7 @@ int schwaemm_256_128_aead_decrypt while (clen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); c += SCHWAEMM_256_128_RATE; @@ -276,12 +277,14 @@ int schwaemm_256_128_aead_decrypt if (clen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -315,21 +318,18 @@ int schwaemm_256_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_192_192_rho(s, domain) \ +#define schwaemm_192_192_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[3] ^ s[6]; \ + s[3] ^= t ^ s[9]; \ + t = s[1]; \ s[1] = s[4] ^ s[7]; \ + s[4] ^= t ^ s[10]; \ + t = s[2]; \ s[2] = s[5] ^ s[8]; \ - s[3] ^= t0 ^ s[9]; \ - s[4] ^= t1 ^ s[10]; \ - s[5] ^= t2 ^ s[11]; \ + s[5] ^= t ^ s[11]; \ } while (0) /** @@ -344,18 +344,20 @@ static void schwaemm_192_192_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); ad += SCHWAEMM_192_192_RATE; adlen -= SCHWAEMM_192_192_RATE; } if (adlen == SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x09); + s[11] ^= DOMAIN(0x09); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_192_192_rho(s, 0x08); + s[11] ^= DOMAIN(0x08); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -391,7 +393,7 @@ int schwaemm_192_192_aead_encrypt while (mlen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_192_192_RATE); @@ -402,13 +404,15 @@ int schwaemm_192_192_aead_encrypt if (mlen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); memcpy(c, block, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -455,7 +459,7 @@ int schwaemm_192_192_aead_decrypt while (clen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); c += SCHWAEMM_192_192_RATE; @@ -465,12 +469,14 @@ int schwaemm_192_192_aead_decrypt if (clen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -504,18 +510,15 @@ int schwaemm_192_192_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. * * \param s SPARKLE-256 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_128_128_rho(s, domain) \ +#define schwaemm_128_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[2] ^ s[4]; \ + s[2] ^= t ^ s[6]; \ + t = s[1]; \ s[1] = s[3] ^ s[5]; \ - s[2] ^= t0 ^ s[6]; \ - s[3] ^= t1 ^ s[7]; \ + s[3] ^= t ^ s[7]; \ } while (0) /** @@ -530,18 +533,20 @@ static void schwaemm_128_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); ad += SCHWAEMM_128_128_RATE; adlen -= SCHWAEMM_128_128_RATE; } if (adlen == SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x05); + s[7] ^= DOMAIN(0x05); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_128_128_rho(s, 0x04); + s[7] ^= DOMAIN(0x04); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -577,7 +582,7 @@ int schwaemm_128_128_aead_encrypt while (mlen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); memcpy(c, block, SCHWAEMM_128_128_RATE); @@ -588,13 +593,15 @@ int schwaemm_128_128_aead_encrypt if (mlen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); memcpy(c, block, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -641,7 +648,7 @@ int schwaemm_128_128_aead_decrypt while (clen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); c += SCHWAEMM_128_128_RATE; @@ -651,12 +658,14 @@ int schwaemm_128_128_aead_decrypt if (clen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -690,24 +699,21 @@ int schwaemm_128_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. * * \param s SPARKLE-512 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_256_rho(s, domain) \ +#define schwaemm_256_256_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[15] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[12]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[13]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[14]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[12]; \ - s[5] ^= t1 ^ s[13]; \ - s[6] ^= t2 ^ s[14]; \ - s[7] ^= t3 ^ s[15]; \ + s[7] ^= t ^ s[15]; \ } while (0) /** @@ -722,18 +728,20 @@ static void schwaemm_256_256_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); ad += SCHWAEMM_256_256_RATE; adlen -= SCHWAEMM_256_256_RATE; } if (adlen == SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x11); + s[15] ^= DOMAIN(0x11); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_256_rho(s, 0x10); + s[15] ^= DOMAIN(0x10); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -769,7 +777,7 @@ int schwaemm_256_256_aead_encrypt while (mlen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); memcpy(c, block, SCHWAEMM_256_256_RATE); @@ -780,13 +788,15 @@ int schwaemm_256_256_aead_encrypt if (mlen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); memcpy(c, block, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -833,7 +843,7 @@ int schwaemm_256_256_aead_decrypt while (clen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); c += SCHWAEMM_256_256_RATE; @@ -843,12 +853,14 @@ int schwaemm_256_256_aead_decrypt if (clen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.c b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.h b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/api.h b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/api.h deleted file mode 100644 index 420cea6..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 32 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/encrypt.c b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/encrypt.c deleted file mode 100644 index 6063cb6..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sparkle.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_256_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_256_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle-avr.S deleted file mode 100644 index 753ea2f..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle-avr.S +++ /dev/null @@ -1,2887 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global sparkle_256 - .type sparkle_256, @function -sparkle_256: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 129f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 129f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 129f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 129f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 129f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 129f - pop r18 - cpi r18,7 - brne 5094f - rjmp 615f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 129f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 129f - rjmp 615f -129: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - movw r18,r4 - movw r20,r6 - movw r4,r14 - movw r6,r12 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - movw r8,r18 - movw r10,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r14,r22 - movw r12,r26 - eor r14,r18 - eor r15,r19 - eor r12,r20 - eor r13,r21 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - movw r22,r16 - movw r26,r24 - eor r22,r28 - eor r23,r29 - eor r26,r2 - eor r27,r3 - movw r28,r14 - movw r2,r12 - ret -615: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_256, .-sparkle_256 - - .text -.global sparkle_384 - .type sparkle_384, @function -sparkle_384: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 140f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 140f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 140f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 140f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 140f - pop r18 - cpi r18,7 - brne 5094f - rjmp 886f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 140f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 140f - rjmp 886f -140: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r0,Z+4 - eor r18,r0 - ldd r0,Z+5 - eor r19,r0 - ldd r0,Z+6 - eor r20,r0 - ldd r0,Z+7 - eor r21,r0 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Z+28,r18 - std Z+29,r19 - std Z+30,r20 - std Z+31,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+36,r18 - std Z+37,r19 - std Z+38,r20 - std Z+39,r21 - eor r8,r14 - eor r9,r15 - eor r10,r12 - eor r11,r13 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+16 - ldd r29,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+24,r14 - std Z+25,r15 - std Z+26,r12 - std Z+27,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - eor r28,r16 - eor r29,r17 - eor r2,r24 - eor r3,r25 - ret -886: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_384, .-sparkle_384 - - .text -.global sparkle_512 - .type sparkle_512, @function -sparkle_512: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 151f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 151f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 151f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 151f - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 151f - pop r18 - cpi r18,8 - brne 5105f - rjmp 1189f -5105: - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,11 - eor r8,r18 - rcall 151f - rjmp 1189f -151: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+32,r22 - std Z+33,r23 - std Z+34,r26 - std Z+35,r27 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r22,Z+48 - ldd r23,Z+49 - ldd r26,Z+50 - ldd r27,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r28,Z+56 - ldd r29,Z+57 - ldd r2,Z+58 - ldd r3,Z+59 - ldd r8,Z+60 - ldd r9,Z+61 - ldd r10,Z+62 - ldd r11,Z+63 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Z+60,r8 - std Z+61,r9 - std Z+62,r10 - std Z+63,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+36,r8 - std Z+37,r9 - std Z+38,r10 - std Z+39,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r8,Z+52 - ldd r9,Z+53 - ldd r10,Z+54 - ldd r11,Z+55 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r0,Z+60 - eor r14,r0 - ldd r0,Z+61 - eor r15,r0 - ldd r0,Z+62 - eor r12,r0 - ldd r0,Z+63 - eor r13,r0 - std Z+20,r14 - std Z+21,r15 - std Z+22,r12 - std Z+23,r13 - movw r4,r18 - movw r6,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - std Z+48,r22 - std Z+49,r23 - std Z+50,r26 - std Z+51,r27 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r14,Z+24 - ldd r15,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+56,r14 - std Z+57,r15 - std Z+58,r12 - std Z+59,r13 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r22,r14 - eor r23,r15 - eor r26,r12 - eor r27,r13 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+32,r14 - std Z+33,r15 - std Z+34,r12 - std Z+35,r13 - ldd r14,Z+8 - ldd r15,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - movw r22,r18 - movw r26,r20 - std Z+40,r14 - std Z+41,r15 - std Z+42,r12 - std Z+43,r13 - ldd r28,Z+48 - ldd r29,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - eor r28,r14 - eor r29,r15 - eor r2,r12 - eor r3,r13 - std Z+48,r14 - std Z+49,r15 - std Z+50,r12 - std Z+51,r13 - ldd r0,Z+56 - eor r16,r0 - ldd r0,Z+57 - eor r17,r0 - ldd r0,Z+58 - eor r24,r0 - ldd r0,Z+59 - eor r25,r0 - std Z+16,r16 - std Z+17,r17 - std Z+18,r24 - std Z+19,r25 - ret -1189: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_512, .-sparkle_512 - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.c deleted file mode 100644 index 4a4c0fb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.c +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sparkle.h" - -#if !defined(__AVR__) - -/* The 8 basic round constants from the specification */ -#define RC_0 0xB7E15162 -#define RC_1 0xBF715880 -#define RC_2 0x38B4DA56 -#define RC_3 0x324E7738 -#define RC_4 0xBB1185EB -#define RC_5 0x4F7C7B57 -#define RC_6 0xCFBFA1C8 -#define RC_7 0xC2B3293D - -/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ -static uint32_t const sparkle_rc[12] = { - RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, - RC_0, RC_1, RC_2, RC_3 -}; - -/** - * \brief Alzette block cipher that implements the ARXbox layer of the - * SPARKLE permutation. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param k 32-bit round key. - */ -#define alzette(x, y, k) \ - do { \ - (x) += leftRotate1((y)); \ - (y) ^= leftRotate8((x)); \ - (x) ^= (k); \ - (x) += leftRotate15((y)); \ - (y) ^= leftRotate15((x)); \ - (x) ^= (k); \ - (x) += (y); \ - (y) ^= leftRotate1((x)); \ - (x) ^= (k); \ - (x) += leftRotate8((y)); \ - (y) ^= leftRotate16((x)); \ - (x) ^= (k); \ - } while (0) - -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3; - uint32_t y0, y1, y2, y3; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-256 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - - /* Linear layer */ - tx = x0 ^ x1; - ty = y0 ^ y1; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y2 ^= tx; - tx ^= y3; - y3 = y1; - y1 = y2 ^ y0; - y2 = y0; - y0 = tx ^ y3; - x2 ^= ty; - ty ^= x3; - x3 = x1; - x1 = x2 ^ x0; - x2 = x0; - x0 = ty ^ x3; - } - - /* Write the local variables back to the SPARKLE-256 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); -#endif -} - -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-384 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2; - ty = y0 ^ y1 ^ y2; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y3 ^= tx; - y4 ^= tx; - tx ^= y5; - y5 = y2; - y2 = y3 ^ y0; - y3 = y0; - y0 = y4 ^ y1; - y4 = y1; - y1 = tx ^ y5; - x3 ^= ty; - x4 ^= ty; - ty ^= x5; - x5 = x2; - x2 = x3 ^ x0; - x3 = x0; - x0 = x4 ^ x1; - x4 = x1; - x1 = ty ^ x5; - } - - /* Write the local variables back to the SPARKLE-384 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); -#endif -} - -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-512 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; - x6 = s[12]; - y6 = s[13]; - x7 = s[14]; - y7 = s[15]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); - x6 = le_load_word32((const uint8_t *)&(s[12])); - y6 = le_load_word32((const uint8_t *)&(s[13])); - x7 = le_load_word32((const uint8_t *)&(s[14])); - y7 = le_load_word32((const uint8_t *)&(s[15])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - alzette(x6, y6, RC_6); - alzette(x7, y7, RC_7); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2 ^ x3; - ty = y0 ^ y1 ^ y2 ^ y3; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y4 ^= tx; - y5 ^= tx; - y6 ^= tx; - tx ^= y7; - y7 = y3; - y3 = y4 ^ y0; - y4 = y0; - y0 = y5 ^ y1; - y5 = y1; - y1 = y6 ^ y2; - y6 = y2; - y2 = tx ^ y7; - x4 ^= ty; - x5 ^= ty; - x6 ^= ty; - ty ^= x7; - x7 = x3; - x3 = x4 ^ x0; - x4 = x0; - x0 = x5 ^ x1; - x5 = x1; - x1 = x6 ^ x2; - x6 = x2; - x2 = ty ^ x7; - } - - /* Write the local variables back to the SPARKLE-512 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; - s[12] = x6; - s[13] = y6; - s[14] = x7; - s[15] = y7; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); - le_store_word32((uint8_t *)&(s[12]), x6); - le_store_word32((uint8_t *)&(s[13]), y6); - le_store_word32((uint8_t *)&(s[14]), x7); - le_store_word32((uint8_t *)&(s[15]), y7); -#endif -} - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.h deleted file mode 100644 index fbdabc1..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-sparkle.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPARKLE_H -#define LW_INTERNAL_SPARKLE_H - -#include "internal-util.h" - -/** - * \file internal-sparkle.h - * \brief Internal implementation of the SPARKLE permutation. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for SPARKLE-256. - */ -#define SPARKLE_256_STATE_SIZE 8 - -/** - * \brief Size of the state for SPARKLE-384. - */ -#define SPARKLE_384_STATE_SIZE 12 - -/** - * \brief Size of the state for SPARKLE-512. - */ -#define SPARKLE_512_STATE_SIZE 16 - -/** - * \brief Performs the SPARKLE-256 permutation. - * - * \param s The words of the SPARKLE-256 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 10. - */ -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-384 permutation. - * - * \param s The words of the SPARKLE-384 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 11. - */ -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-512 permutation. - * - * \param s The words of the SPARKLE-512 state in little-endian byte order. - * \param steps The number of steps to perform, 8 or 12. - */ -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.c deleted file mode 100644 index e2aa25a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.c +++ /dev/null @@ -1,1135 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sparkle.h" -#include "internal-sparkle.h" -#include - -aead_cipher_t const schwaemm_256_128_cipher = { - "Schwaemm256-128", - SCHWAEMM_256_128_KEY_SIZE, - SCHWAEMM_256_128_NONCE_SIZE, - SCHWAEMM_256_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_128_aead_encrypt, - schwaemm_256_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_192_192_cipher = { - "Schwaemm192-192", - SCHWAEMM_192_192_KEY_SIZE, - SCHWAEMM_192_192_NONCE_SIZE, - SCHWAEMM_192_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_192_192_aead_encrypt, - schwaemm_192_192_aead_decrypt -}; - -aead_cipher_t const schwaemm_128_128_cipher = { - "Schwaemm128-128", - SCHWAEMM_128_128_KEY_SIZE, - SCHWAEMM_128_128_NONCE_SIZE, - SCHWAEMM_128_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_128_128_aead_encrypt, - schwaemm_128_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_256_256_cipher = { - "Schwaemm256-256", - SCHWAEMM_256_256_KEY_SIZE, - SCHWAEMM_256_256_NONCE_SIZE, - SCHWAEMM_256_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_256_aead_encrypt, - schwaemm_256_256_aead_decrypt -}; - -aead_hash_algorithm_t const esch_256_hash_algorithm = { - "Esch256", - sizeof(esch_256_hash_state_t), - ESCH_256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_256_hash, - (aead_hash_init_t)esch_256_hash_init, - (aead_hash_update_t)esch_256_hash_update, - (aead_hash_finalize_t)esch_256_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const esch_384_hash_algorithm = { - "Esch384", - sizeof(esch_384_hash_state_t), - ESCH_384_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_384_hash, - (aead_hash_init_t)esch_384_hash_init, - (aead_hash_update_t)esch_384_hash_update, - (aead_hash_finalize_t)esch_384_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \def DOMAIN(value) - * \brief Build a domain separation value as a 32-bit word. - * - * \param value The base value. - * \return The domain separation value as a 32-bit word. - */ -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define DOMAIN(value) (((uint32_t)(value)) << 24) -#else -#define DOMAIN(value) (value) -#endif - -/** - * \brief Rate at which bytes are processed by Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RIGHT(s) \ - (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_256_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[8]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[9]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[10]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_128_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_256_128_RATE; - adlen -= SCHWAEMM_256_128_RATE; - } - if (adlen == SCHWAEMM_256_128_RATE) { - s[11] ^= DOMAIN(0x05); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x04); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_256_128_RATE); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - mlen -= SCHWAEMM_256_128_RATE; - } - if (mlen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - memcpy(c, block, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return 0; -} - -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - clen -= SCHWAEMM_256_128_RATE; - } - if (clen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RATE 24 - -/** - * \brief Pointer to the left of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RIGHT(s) \ - (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_192_192_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[3] ^ s[6]; \ - s[3] ^= t ^ s[9]; \ - t = s[1]; \ - s[1] = s[4] ^ s[7]; \ - s[4] ^= t ^ s[10]; \ - t = s[2]; \ - s[2] = s[5] ^ s[8]; \ - s[5] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_192_192_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_192_192_RATE; - adlen -= SCHWAEMM_192_192_RATE; - } - if (adlen == SCHWAEMM_192_192_RATE) { - s[11] ^= DOMAIN(0x09); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x08); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_192_192_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_192_192_RATE); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - mlen -= SCHWAEMM_192_192_RATE; - } - if (mlen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - memcpy(c, block, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return 0; -} - -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_192_192_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_192_192_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - clen -= SCHWAEMM_192_192_RATE; - } - if (clen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RATE 16 - -/** - * \brief Pointer to the left of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RIGHT(s) \ - (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - */ -#define schwaemm_128_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[2] ^ s[4]; \ - s[2] ^= t ^ s[6]; \ - t = s[1]; \ - s[1] = s[3] ^ s[5]; \ - s[3] ^= t ^ s[7]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_128_128_authenticate - (uint32_t s[SPARKLE_256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - ad += SCHWAEMM_128_128_RATE; - adlen -= SCHWAEMM_128_128_RATE; - } - if (adlen == SCHWAEMM_128_128_RATE) { - s[7] ^= DOMAIN(0x05); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[7] ^= DOMAIN(0x04); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); -} - -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - uint8_t block[SCHWAEMM_128_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - memcpy(c, block, SCHWAEMM_128_128_RATE); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - mlen -= SCHWAEMM_128_128_RATE; - } - if (mlen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - memcpy(c, block, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_256(s, 10); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return 0; -} - -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_128_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_128_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - clen -= SCHWAEMM_128_128_RATE; - } - if (clen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RIGHT(s) \ - (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - */ -#define schwaemm_256_256_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[12]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[13]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[14]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[15]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_256_authenticate - (uint32_t s[SPARKLE_512_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - ad += SCHWAEMM_256_256_RATE; - adlen -= SCHWAEMM_256_256_RATE; - } - if (adlen == SCHWAEMM_256_256_RATE) { - s[15] ^= DOMAIN(0x11); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[15] ^= DOMAIN(0x10); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); -} - -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_256_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - memcpy(c, block, SCHWAEMM_256_256_RATE); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - mlen -= SCHWAEMM_256_256_RATE; - } - if (mlen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - memcpy(c, block, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_512(s, 12); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return 0; -} - -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_256_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_256_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - clen -= SCHWAEMM_256_256_RATE; - } - if (clen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Esch256. - */ -#define ESCH_256_RATE 16 - -/** - * \brief Perform the M3 step for Esch256 to mix the input with the state. - * - * \param s SPARKLE-384 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_256_m3(s, block, domain) \ - do { \ - uint32_t tx = (block)[0] ^ (block)[2]; \ - uint32_t ty = (block)[1] ^ (block)[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= (block)[0] ^ ty; \ - s[1] ^= (block)[1] ^ tx; \ - s[2] ^= (block)[2] ^ ty; \ - s[3] ^= (block)[3] ^ tx; \ - if ((domain) != 0) \ - s[5] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - } while (0) - -/** @cond esch_256 */ - -/** - * \brief Word-based state for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_384_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_256_hash_state_wt; - -/** @endcond */ - -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x00); - sparkle_384(s, 7); - in += ESCH_256_RATE; - inlen -= ESCH_256_RATE; - } - if (inlen == ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(s, block, 0x01); - } - sparkle_384(s, 11); - memcpy(out, s, ESCH_256_RATE); - sparkle_384(s, 7); - memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); - return 0; -} - -void esch_256_hash_init(esch_256_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_256_hash_state_t)); -} - -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x00); - sparkle_384(st->s.state, 7); - st->s.count = 0; - } - temp = ESCH_256_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(st->s.state, st->s.block, 0x01); - } - sparkle_384(st->s.state, 11); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_256_RATE); - sparkle_384(st->s.state, 7); - memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); -} - -/** - * \brief Rate at which bytes are processed by Esch384. - */ -#define ESCH_384_RATE 16 - -/** - * \brief Perform the M4 step for Esch384 to mix the input with the state. - * - * \param s SPARKLE-512 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_384_m4(s, block, domain) \ - do { \ - uint32_t tx = block[0] ^ block[2]; \ - uint32_t ty = block[1] ^ block[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= block[0] ^ ty; \ - s[1] ^= block[1] ^ tx; \ - s[2] ^= block[2] ^ ty; \ - s[3] ^= block[3] ^ tx; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - s[6] ^= ty; \ - s[7] ^= tx; \ - } while (0) - -/** @cond esch_384 */ - -/** - * \brief Word-based state for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_512_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_384_hash_state_wt; - -/** @endcond */ - -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x00); - sparkle_512(s, 8); - in += ESCH_384_RATE; - inlen -= ESCH_384_RATE; - } - if (inlen == ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(s, block, 0x01); - } - sparkle_512(s, 12); - memcpy(out, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); - return 0; -} - -void esch_384_hash_init(esch_384_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_384_hash_state_t)); -} - -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x00); - sparkle_512(st->s.state, 8); - st->s.count = 0; - } - temp = ESCH_384_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(st->s.state, st->s.block, 0x01); - } - sparkle_512(st->s.state, 12); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.h deleted file mode 100644 index dd0999e..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys-avr/sparkle.h +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPARKLE_H -#define LWCRYPTO_SPARKLE_H - -#include "aead-common.h" - -/** - * \file sparkle.h - * \brief Encryption and hash algorithms based on the SPARKLE permutation. - * - * SPARKLE is a family of encryption and hash algorithms that are based - * around the SPARKLE permutation. There are three versions of the - * permutation with 256-bit, 384-bit, and 512-bit state sizes. - * The algorithms in the family are: - * - * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. - * This is the primary encryption algorithm in the family. - * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. - * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. - * \li Esch256 hash algorithm with a 256-bit digest output. This is the - * primary hash algorithm in the family. - * \li Esch384 hash algorithm with a 384-bit digest output. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_NONCE_SIZE 32 - -/** - * \brief Size of the key for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash output for Esch256. - */ -#define ESCH_256_HASH_SIZE 32 - -/** - * \brief Size of the hash output for Esch384. - */ -#define ESCH_384_HASH_SIZE 48 - -/** - * \brief Meta-information block for the Schwaemm256-128 cipher. - */ -extern aead_cipher_t const schwaemm_256_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm192-192 cipher. - */ -extern aead_cipher_t const schwaemm_192_192_cipher; - -/** - * \brief Meta-information block for the Schwaemm128-128 cipher. - */ -extern aead_cipher_t const schwaemm_128_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm256-256 cipher. - */ -extern aead_cipher_t const schwaemm_256_256_cipher; - -/** - * \brief Meta-information block for the Esch256 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_256_hash_algorithm; - -/** - * \brief Meta-information block for the Esch384 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_384_hash_algorithm; - -/** - * \brief State information for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_256_hash_state_t; - -/** - * \brief State information for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[64]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_384_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_128_aead_decrypt() - */ -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_128_aead_encrypt() - */ -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm192-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 24 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_192_192_aead_decrypt() - */ -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm192-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 24 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_192_192_aead_encrypt() - */ -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm128-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_128_128_aead_decrypt() - */ -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm128-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_128_128_aead_encrypt() - */ -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_256_aead_decrypt() - */ -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_256_aead_encrypt() - */ -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Esch256 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch256 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() - */ -void esch_256_hash_init(esch_256_hash_state_t *state); - -/** - * \brief Updates an Esch256 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_256_hash_init(), esch_256_hash_finalize() - */ -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch256 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa esch_256_hash_init(), esch_256_hash_update() - */ -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with Esch384 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_384_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch384 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() - */ -void esch_384_hash_init(esch_384_hash_state_t *state); - -/** - * \brief Updates an Esch384 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_384_hash_init(), esch_384_hash_finalize() - */ -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch384 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 48-byte hash value. - * - * \sa esch_384_hash_init(), esch_384_hash_update() - */ -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle-avr.S new file mode 100644 index 0000000..753ea2f --- /dev/null +++ b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle-avr.S @@ -0,0 +1,2887 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global sparkle_256 + .type sparkle_256, @function +sparkle_256: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 129f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 129f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 129f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 129f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 129f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 129f + pop r18 + cpi r18,7 + brne 5094f + rjmp 615f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 129f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 129f + rjmp 615f +129: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + movw r18,r4 + movw r20,r6 + movw r4,r14 + movw r6,r12 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + movw r8,r18 + movw r10,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r14,r22 + movw r12,r26 + eor r14,r18 + eor r15,r19 + eor r12,r20 + eor r13,r21 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + movw r22,r16 + movw r26,r24 + eor r22,r28 + eor r23,r29 + eor r26,r2 + eor r27,r3 + movw r28,r14 + movw r2,r12 + ret +615: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_256, .-sparkle_256 + + .text +.global sparkle_384 + .type sparkle_384, @function +sparkle_384: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 140f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 140f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 140f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 140f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 140f + pop r18 + cpi r18,7 + brne 5094f + rjmp 886f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 140f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 140f + rjmp 886f +140: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r0,Z+4 + eor r18,r0 + ldd r0,Z+5 + eor r19,r0 + ldd r0,Z+6 + eor r20,r0 + ldd r0,Z+7 + eor r21,r0 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Z+28,r18 + std Z+29,r19 + std Z+30,r20 + std Z+31,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+36,r18 + std Z+37,r19 + std Z+38,r20 + std Z+39,r21 + eor r8,r14 + eor r9,r15 + eor r10,r12 + eor r11,r13 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+16 + ldd r29,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+24,r14 + std Z+25,r15 + std Z+26,r12 + std Z+27,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + eor r28,r16 + eor r29,r17 + eor r2,r24 + eor r3,r25 + ret +886: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_384, .-sparkle_384 + + .text +.global sparkle_512 + .type sparkle_512, @function +sparkle_512: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 151f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 151f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 151f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 151f + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 151f + pop r18 + cpi r18,8 + brne 5105f + rjmp 1189f +5105: + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,11 + eor r8,r18 + rcall 151f + rjmp 1189f +151: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+32,r22 + std Z+33,r23 + std Z+34,r26 + std Z+35,r27 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r22,Z+48 + ldd r23,Z+49 + ldd r26,Z+50 + ldd r27,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r28,Z+56 + ldd r29,Z+57 + ldd r2,Z+58 + ldd r3,Z+59 + ldd r8,Z+60 + ldd r9,Z+61 + ldd r10,Z+62 + ldd r11,Z+63 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Z+60,r8 + std Z+61,r9 + std Z+62,r10 + std Z+63,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+36,r8 + std Z+37,r9 + std Z+38,r10 + std Z+39,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r8,Z+52 + ldd r9,Z+53 + ldd r10,Z+54 + ldd r11,Z+55 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r0,Z+60 + eor r14,r0 + ldd r0,Z+61 + eor r15,r0 + ldd r0,Z+62 + eor r12,r0 + ldd r0,Z+63 + eor r13,r0 + std Z+20,r14 + std Z+21,r15 + std Z+22,r12 + std Z+23,r13 + movw r4,r18 + movw r6,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + std Z+48,r22 + std Z+49,r23 + std Z+50,r26 + std Z+51,r27 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r14,Z+24 + ldd r15,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+56,r14 + std Z+57,r15 + std Z+58,r12 + std Z+59,r13 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r22,r14 + eor r23,r15 + eor r26,r12 + eor r27,r13 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+32,r14 + std Z+33,r15 + std Z+34,r12 + std Z+35,r13 + ldd r14,Z+8 + ldd r15,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + movw r22,r18 + movw r26,r20 + std Z+40,r14 + std Z+41,r15 + std Z+42,r12 + std Z+43,r13 + ldd r28,Z+48 + ldd r29,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + eor r28,r14 + eor r29,r15 + eor r2,r12 + eor r3,r13 + std Z+48,r14 + std Z+49,r15 + std Z+50,r12 + std Z+51,r13 + ldd r0,Z+56 + eor r16,r0 + ldd r0,Z+57 + eor r17,r0 + ldd r0,Z+58 + eor r24,r0 + ldd r0,Z+59 + eor r25,r0 + std Z+16,r16 + std Z+17,r17 + std Z+18,r24 + std Z+19,r25 + ret +1189: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_512, .-sparkle_512 + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle.c index 822af50..4a4c0fb 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-sparkle.c @@ -22,6 +22,8 @@ #include "internal-sparkle.h" +#if !defined(__AVR__) + /* The 8 basic round constants from the specification */ #define RC_0 0xB7E15162 #define RC_1 0xBF715880 @@ -66,7 +68,7 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3; uint32_t y0, y1, y2, y3; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-256 state up into local variables */ @@ -105,18 +107,20 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1; ty = y0 ^ y1; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x3 ^ x1 ^ ty; - x3 = x1; - y0 = y3 ^ y1 ^ tx; + y2 ^= tx; + tx ^= y3; y3 = y1; - x1 = x2 ^ tw ^ ty; - x2 = tw; - y1 = y2 ^ tz ^ tx; - y2 = tz; + y1 = y2 ^ y0; + y2 = y0; + y0 = tx ^ y3; + x2 ^= ty; + ty ^= x3; + x3 = x1; + x1 = x2 ^ x0; + x2 = x0; + x0 = ty ^ x3; } /* Write the local variables back to the SPARKLE-256 state */ @@ -145,7 +149,7 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5; uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-384 state up into local variables */ @@ -194,22 +198,26 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2; ty = y0 ^ y1 ^ y2; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x4 ^ x1 ^ ty; - x4 = x1; - y0 = y4 ^ y1 ^ tx; + y3 ^= tx; + y4 ^= tx; + tx ^= y5; + y5 = y2; + y2 = y3 ^ y0; + y3 = y0; + y0 = y4 ^ y1; y4 = y1; - x1 = x5 ^ x2 ^ ty; + y1 = tx ^ y5; + x3 ^= ty; + x4 ^= ty; + ty ^= x5; x5 = x2; - y1 = y5 ^ y2 ^ tx; - y5 = y2; - x2 = x3 ^ tw ^ ty; - x3 = tw; - y2 = y3 ^ tz ^ tx; - y3 = tz; + x2 = x3 ^ x0; + x3 = x0; + x0 = x4 ^ x1; + x4 = x1; + x1 = ty ^ x5; } /* Write the local variables back to the SPARKLE-384 state */ @@ -246,7 +254,7 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-512 state up into local variables */ @@ -305,26 +313,32 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2 ^ x3; ty = y0 ^ y1 ^ y2 ^ y3; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x5 ^ x1 ^ ty; - x5 = x1; - y0 = y5 ^ y1 ^ tx; + y4 ^= tx; + y5 ^= tx; + y6 ^= tx; + tx ^= y7; + y7 = y3; + y3 = y4 ^ y0; + y4 = y0; + y0 = y5 ^ y1; y5 = y1; - x1 = x6 ^ x2 ^ ty; - x6 = x2; - y1 = y6 ^ y2 ^ tx; + y1 = y6 ^ y2; y6 = y2; - x2 = x7 ^ x3 ^ ty; + y2 = tx ^ y7; + x4 ^= ty; + x5 ^= ty; + x6 ^= ty; + ty ^= x7; x7 = x3; - y2 = y7 ^ y3 ^ tx; - y7 = y3; - x3 = x4 ^ tw ^ ty; - x4 = tw; - y3 = y4 ^ tz ^ tx; - y4 = tz; + x3 = x4 ^ x0; + x4 = x0; + x0 = x5 ^ x1; + x5 = x1; + x1 = x6 ^ x2; + x6 = x2; + x2 = ty ^ x7; } /* Write the local variables back to the SPARKLE-512 state */ @@ -364,3 +378,5 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) le_store_word32((uint8_t *)&(s[15]), y7); #endif } + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-util.h +++ b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/sparkle.c index b357de6..e2aa25a 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm256128v1/rhys/sparkle.c @@ -123,24 +123,21 @@ aead_hash_algorithm_t const esch_384_hash_algorithm = { * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_128_rho(s, domain) \ +#define schwaemm_256_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[8]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[9]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[10]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[8]; \ - s[5] ^= t1 ^ s[9]; \ - s[6] ^= t2 ^ s[10]; \ - s[7] ^= t3 ^ s[11]; \ + s[7] ^= t ^ s[11]; \ } while (0) /** @@ -155,18 +152,20 @@ static void schwaemm_256_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); ad += SCHWAEMM_256_128_RATE; adlen -= SCHWAEMM_256_128_RATE; } if (adlen == SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x05); + s[11] ^= DOMAIN(0x05); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_128_rho(s, 0x04); + s[11] ^= DOMAIN(0x04); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -202,7 +201,7 @@ int schwaemm_256_128_aead_encrypt while (mlen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_256_128_RATE); @@ -213,13 +212,15 @@ int schwaemm_256_128_aead_encrypt if (mlen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); memcpy(c, block, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -266,7 +267,7 @@ int schwaemm_256_128_aead_decrypt while (clen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); c += SCHWAEMM_256_128_RATE; @@ -276,12 +277,14 @@ int schwaemm_256_128_aead_decrypt if (clen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -315,21 +318,18 @@ int schwaemm_256_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_192_192_rho(s, domain) \ +#define schwaemm_192_192_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[3] ^ s[6]; \ + s[3] ^= t ^ s[9]; \ + t = s[1]; \ s[1] = s[4] ^ s[7]; \ + s[4] ^= t ^ s[10]; \ + t = s[2]; \ s[2] = s[5] ^ s[8]; \ - s[3] ^= t0 ^ s[9]; \ - s[4] ^= t1 ^ s[10]; \ - s[5] ^= t2 ^ s[11]; \ + s[5] ^= t ^ s[11]; \ } while (0) /** @@ -344,18 +344,20 @@ static void schwaemm_192_192_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); ad += SCHWAEMM_192_192_RATE; adlen -= SCHWAEMM_192_192_RATE; } if (adlen == SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x09); + s[11] ^= DOMAIN(0x09); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_192_192_rho(s, 0x08); + s[11] ^= DOMAIN(0x08); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -391,7 +393,7 @@ int schwaemm_192_192_aead_encrypt while (mlen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_192_192_RATE); @@ -402,13 +404,15 @@ int schwaemm_192_192_aead_encrypt if (mlen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); memcpy(c, block, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -455,7 +459,7 @@ int schwaemm_192_192_aead_decrypt while (clen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); c += SCHWAEMM_192_192_RATE; @@ -465,12 +469,14 @@ int schwaemm_192_192_aead_decrypt if (clen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -504,18 +510,15 @@ int schwaemm_192_192_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. * * \param s SPARKLE-256 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_128_128_rho(s, domain) \ +#define schwaemm_128_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[2] ^ s[4]; \ + s[2] ^= t ^ s[6]; \ + t = s[1]; \ s[1] = s[3] ^ s[5]; \ - s[2] ^= t0 ^ s[6]; \ - s[3] ^= t1 ^ s[7]; \ + s[3] ^= t ^ s[7]; \ } while (0) /** @@ -530,18 +533,20 @@ static void schwaemm_128_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); ad += SCHWAEMM_128_128_RATE; adlen -= SCHWAEMM_128_128_RATE; } if (adlen == SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x05); + s[7] ^= DOMAIN(0x05); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_128_128_rho(s, 0x04); + s[7] ^= DOMAIN(0x04); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -577,7 +582,7 @@ int schwaemm_128_128_aead_encrypt while (mlen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); memcpy(c, block, SCHWAEMM_128_128_RATE); @@ -588,13 +593,15 @@ int schwaemm_128_128_aead_encrypt if (mlen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); memcpy(c, block, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -641,7 +648,7 @@ int schwaemm_128_128_aead_decrypt while (clen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); c += SCHWAEMM_128_128_RATE; @@ -651,12 +658,14 @@ int schwaemm_128_128_aead_decrypt if (clen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -690,24 +699,21 @@ int schwaemm_128_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. * * \param s SPARKLE-512 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_256_rho(s, domain) \ +#define schwaemm_256_256_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[15] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[12]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[13]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[14]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[12]; \ - s[5] ^= t1 ^ s[13]; \ - s[6] ^= t2 ^ s[14]; \ - s[7] ^= t3 ^ s[15]; \ + s[7] ^= t ^ s[15]; \ } while (0) /** @@ -722,18 +728,20 @@ static void schwaemm_256_256_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); ad += SCHWAEMM_256_256_RATE; adlen -= SCHWAEMM_256_256_RATE; } if (adlen == SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x11); + s[15] ^= DOMAIN(0x11); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_256_rho(s, 0x10); + s[15] ^= DOMAIN(0x10); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -769,7 +777,7 @@ int schwaemm_256_256_aead_encrypt while (mlen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); memcpy(c, block, SCHWAEMM_256_256_RATE); @@ -780,13 +788,15 @@ int schwaemm_256_256_aead_encrypt if (mlen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); memcpy(c, block, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -833,7 +843,7 @@ int schwaemm_256_256_aead_decrypt while (clen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); c += SCHWAEMM_256_256_RATE; @@ -843,12 +853,14 @@ int schwaemm_256_256_aead_decrypt if (clen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.c b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.h b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/api.h b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/api.h deleted file mode 100644 index c11fc10..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 32 -#define CRYPTO_ABYTES 32 -#define CRYPTO_NOOVERLAP 1 diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/encrypt.c b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/encrypt.c deleted file mode 100644 index c5f15f6..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sparkle.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_256_256_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return schwaemm_256_256_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle-avr.S deleted file mode 100644 index 753ea2f..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle-avr.S +++ /dev/null @@ -1,2887 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global sparkle_256 - .type sparkle_256, @function -sparkle_256: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 129f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 129f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 129f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 129f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 129f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 129f - pop r18 - cpi r18,7 - brne 5094f - rjmp 615f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 129f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 129f - rjmp 615f -129: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - movw r18,r4 - movw r20,r6 - movw r4,r14 - movw r6,r12 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - movw r8,r18 - movw r10,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r14,r22 - movw r12,r26 - eor r14,r18 - eor r15,r19 - eor r12,r20 - eor r13,r21 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - movw r22,r16 - movw r26,r24 - eor r22,r28 - eor r23,r29 - eor r26,r2 - eor r27,r3 - movw r28,r14 - movw r2,r12 - ret -615: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_256, .-sparkle_256 - - .text -.global sparkle_384 - .type sparkle_384, @function -sparkle_384: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 140f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 140f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 140f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 140f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 140f - pop r18 - cpi r18,7 - brne 5094f - rjmp 886f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 140f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 140f - rjmp 886f -140: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r0,Z+4 - eor r18,r0 - ldd r0,Z+5 - eor r19,r0 - ldd r0,Z+6 - eor r20,r0 - ldd r0,Z+7 - eor r21,r0 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Z+28,r18 - std Z+29,r19 - std Z+30,r20 - std Z+31,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+36,r18 - std Z+37,r19 - std Z+38,r20 - std Z+39,r21 - eor r8,r14 - eor r9,r15 - eor r10,r12 - eor r11,r13 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+16 - ldd r29,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+24,r14 - std Z+25,r15 - std Z+26,r12 - std Z+27,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - eor r28,r16 - eor r29,r17 - eor r2,r24 - eor r3,r25 - ret -886: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_384, .-sparkle_384 - - .text -.global sparkle_512 - .type sparkle_512, @function -sparkle_512: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 151f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 151f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 151f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 151f - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 151f - pop r18 - cpi r18,8 - brne 5105f - rjmp 1189f -5105: - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,11 - eor r8,r18 - rcall 151f - rjmp 1189f -151: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+32,r22 - std Z+33,r23 - std Z+34,r26 - std Z+35,r27 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r22,Z+48 - ldd r23,Z+49 - ldd r26,Z+50 - ldd r27,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r28,Z+56 - ldd r29,Z+57 - ldd r2,Z+58 - ldd r3,Z+59 - ldd r8,Z+60 - ldd r9,Z+61 - ldd r10,Z+62 - ldd r11,Z+63 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Z+60,r8 - std Z+61,r9 - std Z+62,r10 - std Z+63,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+36,r8 - std Z+37,r9 - std Z+38,r10 - std Z+39,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r8,Z+52 - ldd r9,Z+53 - ldd r10,Z+54 - ldd r11,Z+55 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r0,Z+60 - eor r14,r0 - ldd r0,Z+61 - eor r15,r0 - ldd r0,Z+62 - eor r12,r0 - ldd r0,Z+63 - eor r13,r0 - std Z+20,r14 - std Z+21,r15 - std Z+22,r12 - std Z+23,r13 - movw r4,r18 - movw r6,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - std Z+48,r22 - std Z+49,r23 - std Z+50,r26 - std Z+51,r27 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r14,Z+24 - ldd r15,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+56,r14 - std Z+57,r15 - std Z+58,r12 - std Z+59,r13 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r22,r14 - eor r23,r15 - eor r26,r12 - eor r27,r13 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+32,r14 - std Z+33,r15 - std Z+34,r12 - std Z+35,r13 - ldd r14,Z+8 - ldd r15,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - movw r22,r18 - movw r26,r20 - std Z+40,r14 - std Z+41,r15 - std Z+42,r12 - std Z+43,r13 - ldd r28,Z+48 - ldd r29,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - eor r28,r14 - eor r29,r15 - eor r2,r12 - eor r3,r13 - std Z+48,r14 - std Z+49,r15 - std Z+50,r12 - std Z+51,r13 - ldd r0,Z+56 - eor r16,r0 - ldd r0,Z+57 - eor r17,r0 - ldd r0,Z+58 - eor r24,r0 - ldd r0,Z+59 - eor r25,r0 - std Z+16,r16 - std Z+17,r17 - std Z+18,r24 - std Z+19,r25 - ret -1189: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_512, .-sparkle_512 - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.c deleted file mode 100644 index 4a4c0fb..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.c +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sparkle.h" - -#if !defined(__AVR__) - -/* The 8 basic round constants from the specification */ -#define RC_0 0xB7E15162 -#define RC_1 0xBF715880 -#define RC_2 0x38B4DA56 -#define RC_3 0x324E7738 -#define RC_4 0xBB1185EB -#define RC_5 0x4F7C7B57 -#define RC_6 0xCFBFA1C8 -#define RC_7 0xC2B3293D - -/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ -static uint32_t const sparkle_rc[12] = { - RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, - RC_0, RC_1, RC_2, RC_3 -}; - -/** - * \brief Alzette block cipher that implements the ARXbox layer of the - * SPARKLE permutation. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param k 32-bit round key. - */ -#define alzette(x, y, k) \ - do { \ - (x) += leftRotate1((y)); \ - (y) ^= leftRotate8((x)); \ - (x) ^= (k); \ - (x) += leftRotate15((y)); \ - (y) ^= leftRotate15((x)); \ - (x) ^= (k); \ - (x) += (y); \ - (y) ^= leftRotate1((x)); \ - (x) ^= (k); \ - (x) += leftRotate8((y)); \ - (y) ^= leftRotate16((x)); \ - (x) ^= (k); \ - } while (0) - -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3; - uint32_t y0, y1, y2, y3; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-256 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - - /* Linear layer */ - tx = x0 ^ x1; - ty = y0 ^ y1; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y2 ^= tx; - tx ^= y3; - y3 = y1; - y1 = y2 ^ y0; - y2 = y0; - y0 = tx ^ y3; - x2 ^= ty; - ty ^= x3; - x3 = x1; - x1 = x2 ^ x0; - x2 = x0; - x0 = ty ^ x3; - } - - /* Write the local variables back to the SPARKLE-256 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); -#endif -} - -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-384 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2; - ty = y0 ^ y1 ^ y2; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y3 ^= tx; - y4 ^= tx; - tx ^= y5; - y5 = y2; - y2 = y3 ^ y0; - y3 = y0; - y0 = y4 ^ y1; - y4 = y1; - y1 = tx ^ y5; - x3 ^= ty; - x4 ^= ty; - ty ^= x5; - x5 = x2; - x2 = x3 ^ x0; - x3 = x0; - x0 = x4 ^ x1; - x4 = x1; - x1 = ty ^ x5; - } - - /* Write the local variables back to the SPARKLE-384 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); -#endif -} - -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-512 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; - x6 = s[12]; - y6 = s[13]; - x7 = s[14]; - y7 = s[15]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); - x6 = le_load_word32((const uint8_t *)&(s[12])); - y6 = le_load_word32((const uint8_t *)&(s[13])); - x7 = le_load_word32((const uint8_t *)&(s[14])); - y7 = le_load_word32((const uint8_t *)&(s[15])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - alzette(x6, y6, RC_6); - alzette(x7, y7, RC_7); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2 ^ x3; - ty = y0 ^ y1 ^ y2 ^ y3; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y4 ^= tx; - y5 ^= tx; - y6 ^= tx; - tx ^= y7; - y7 = y3; - y3 = y4 ^ y0; - y4 = y0; - y0 = y5 ^ y1; - y5 = y1; - y1 = y6 ^ y2; - y6 = y2; - y2 = tx ^ y7; - x4 ^= ty; - x5 ^= ty; - x6 ^= ty; - ty ^= x7; - x7 = x3; - x3 = x4 ^ x0; - x4 = x0; - x0 = x5 ^ x1; - x5 = x1; - x1 = x6 ^ x2; - x6 = x2; - x2 = ty ^ x7; - } - - /* Write the local variables back to the SPARKLE-512 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; - s[12] = x6; - s[13] = y6; - s[14] = x7; - s[15] = y7; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); - le_store_word32((uint8_t *)&(s[12]), x6); - le_store_word32((uint8_t *)&(s[13]), y6); - le_store_word32((uint8_t *)&(s[14]), x7); - le_store_word32((uint8_t *)&(s[15]), y7); -#endif -} - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.h deleted file mode 100644 index fbdabc1..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-sparkle.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPARKLE_H -#define LW_INTERNAL_SPARKLE_H - -#include "internal-util.h" - -/** - * \file internal-sparkle.h - * \brief Internal implementation of the SPARKLE permutation. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for SPARKLE-256. - */ -#define SPARKLE_256_STATE_SIZE 8 - -/** - * \brief Size of the state for SPARKLE-384. - */ -#define SPARKLE_384_STATE_SIZE 12 - -/** - * \brief Size of the state for SPARKLE-512. - */ -#define SPARKLE_512_STATE_SIZE 16 - -/** - * \brief Performs the SPARKLE-256 permutation. - * - * \param s The words of the SPARKLE-256 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 10. - */ -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-384 permutation. - * - * \param s The words of the SPARKLE-384 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 11. - */ -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-512 permutation. - * - * \param s The words of the SPARKLE-512 state in little-endian byte order. - * \param steps The number of steps to perform, 8 or 12. - */ -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.c deleted file mode 100644 index e2aa25a..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.c +++ /dev/null @@ -1,1135 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sparkle.h" -#include "internal-sparkle.h" -#include - -aead_cipher_t const schwaemm_256_128_cipher = { - "Schwaemm256-128", - SCHWAEMM_256_128_KEY_SIZE, - SCHWAEMM_256_128_NONCE_SIZE, - SCHWAEMM_256_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_128_aead_encrypt, - schwaemm_256_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_192_192_cipher = { - "Schwaemm192-192", - SCHWAEMM_192_192_KEY_SIZE, - SCHWAEMM_192_192_NONCE_SIZE, - SCHWAEMM_192_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_192_192_aead_encrypt, - schwaemm_192_192_aead_decrypt -}; - -aead_cipher_t const schwaemm_128_128_cipher = { - "Schwaemm128-128", - SCHWAEMM_128_128_KEY_SIZE, - SCHWAEMM_128_128_NONCE_SIZE, - SCHWAEMM_128_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_128_128_aead_encrypt, - schwaemm_128_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_256_256_cipher = { - "Schwaemm256-256", - SCHWAEMM_256_256_KEY_SIZE, - SCHWAEMM_256_256_NONCE_SIZE, - SCHWAEMM_256_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_256_aead_encrypt, - schwaemm_256_256_aead_decrypt -}; - -aead_hash_algorithm_t const esch_256_hash_algorithm = { - "Esch256", - sizeof(esch_256_hash_state_t), - ESCH_256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_256_hash, - (aead_hash_init_t)esch_256_hash_init, - (aead_hash_update_t)esch_256_hash_update, - (aead_hash_finalize_t)esch_256_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const esch_384_hash_algorithm = { - "Esch384", - sizeof(esch_384_hash_state_t), - ESCH_384_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_384_hash, - (aead_hash_init_t)esch_384_hash_init, - (aead_hash_update_t)esch_384_hash_update, - (aead_hash_finalize_t)esch_384_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \def DOMAIN(value) - * \brief Build a domain separation value as a 32-bit word. - * - * \param value The base value. - * \return The domain separation value as a 32-bit word. - */ -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define DOMAIN(value) (((uint32_t)(value)) << 24) -#else -#define DOMAIN(value) (value) -#endif - -/** - * \brief Rate at which bytes are processed by Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RIGHT(s) \ - (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_256_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[8]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[9]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[10]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_128_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_256_128_RATE; - adlen -= SCHWAEMM_256_128_RATE; - } - if (adlen == SCHWAEMM_256_128_RATE) { - s[11] ^= DOMAIN(0x05); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x04); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_256_128_RATE); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - mlen -= SCHWAEMM_256_128_RATE; - } - if (mlen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - memcpy(c, block, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return 0; -} - -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - clen -= SCHWAEMM_256_128_RATE; - } - if (clen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RATE 24 - -/** - * \brief Pointer to the left of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RIGHT(s) \ - (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_192_192_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[3] ^ s[6]; \ - s[3] ^= t ^ s[9]; \ - t = s[1]; \ - s[1] = s[4] ^ s[7]; \ - s[4] ^= t ^ s[10]; \ - t = s[2]; \ - s[2] = s[5] ^ s[8]; \ - s[5] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_192_192_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_192_192_RATE; - adlen -= SCHWAEMM_192_192_RATE; - } - if (adlen == SCHWAEMM_192_192_RATE) { - s[11] ^= DOMAIN(0x09); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x08); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_192_192_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_192_192_RATE); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - mlen -= SCHWAEMM_192_192_RATE; - } - if (mlen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - memcpy(c, block, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return 0; -} - -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_192_192_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_192_192_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - clen -= SCHWAEMM_192_192_RATE; - } - if (clen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RATE 16 - -/** - * \brief Pointer to the left of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RIGHT(s) \ - (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - */ -#define schwaemm_128_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[2] ^ s[4]; \ - s[2] ^= t ^ s[6]; \ - t = s[1]; \ - s[1] = s[3] ^ s[5]; \ - s[3] ^= t ^ s[7]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_128_128_authenticate - (uint32_t s[SPARKLE_256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - ad += SCHWAEMM_128_128_RATE; - adlen -= SCHWAEMM_128_128_RATE; - } - if (adlen == SCHWAEMM_128_128_RATE) { - s[7] ^= DOMAIN(0x05); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[7] ^= DOMAIN(0x04); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); -} - -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - uint8_t block[SCHWAEMM_128_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - memcpy(c, block, SCHWAEMM_128_128_RATE); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - mlen -= SCHWAEMM_128_128_RATE; - } - if (mlen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - memcpy(c, block, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_256(s, 10); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return 0; -} - -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_128_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_128_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - clen -= SCHWAEMM_128_128_RATE; - } - if (clen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RIGHT(s) \ - (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - */ -#define schwaemm_256_256_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[12]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[13]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[14]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[15]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_256_authenticate - (uint32_t s[SPARKLE_512_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - ad += SCHWAEMM_256_256_RATE; - adlen -= SCHWAEMM_256_256_RATE; - } - if (adlen == SCHWAEMM_256_256_RATE) { - s[15] ^= DOMAIN(0x11); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[15] ^= DOMAIN(0x10); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); -} - -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_256_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - memcpy(c, block, SCHWAEMM_256_256_RATE); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - mlen -= SCHWAEMM_256_256_RATE; - } - if (mlen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - memcpy(c, block, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_512(s, 12); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return 0; -} - -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_256_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_256_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - clen -= SCHWAEMM_256_256_RATE; - } - if (clen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Esch256. - */ -#define ESCH_256_RATE 16 - -/** - * \brief Perform the M3 step for Esch256 to mix the input with the state. - * - * \param s SPARKLE-384 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_256_m3(s, block, domain) \ - do { \ - uint32_t tx = (block)[0] ^ (block)[2]; \ - uint32_t ty = (block)[1] ^ (block)[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= (block)[0] ^ ty; \ - s[1] ^= (block)[1] ^ tx; \ - s[2] ^= (block)[2] ^ ty; \ - s[3] ^= (block)[3] ^ tx; \ - if ((domain) != 0) \ - s[5] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - } while (0) - -/** @cond esch_256 */ - -/** - * \brief Word-based state for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_384_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_256_hash_state_wt; - -/** @endcond */ - -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x00); - sparkle_384(s, 7); - in += ESCH_256_RATE; - inlen -= ESCH_256_RATE; - } - if (inlen == ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(s, block, 0x01); - } - sparkle_384(s, 11); - memcpy(out, s, ESCH_256_RATE); - sparkle_384(s, 7); - memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); - return 0; -} - -void esch_256_hash_init(esch_256_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_256_hash_state_t)); -} - -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x00); - sparkle_384(st->s.state, 7); - st->s.count = 0; - } - temp = ESCH_256_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(st->s.state, st->s.block, 0x01); - } - sparkle_384(st->s.state, 11); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_256_RATE); - sparkle_384(st->s.state, 7); - memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); -} - -/** - * \brief Rate at which bytes are processed by Esch384. - */ -#define ESCH_384_RATE 16 - -/** - * \brief Perform the M4 step for Esch384 to mix the input with the state. - * - * \param s SPARKLE-512 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_384_m4(s, block, domain) \ - do { \ - uint32_t tx = block[0] ^ block[2]; \ - uint32_t ty = block[1] ^ block[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= block[0] ^ ty; \ - s[1] ^= block[1] ^ tx; \ - s[2] ^= block[2] ^ ty; \ - s[3] ^= block[3] ^ tx; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - s[6] ^= ty; \ - s[7] ^= tx; \ - } while (0) - -/** @cond esch_384 */ - -/** - * \brief Word-based state for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_512_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_384_hash_state_wt; - -/** @endcond */ - -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x00); - sparkle_512(s, 8); - in += ESCH_384_RATE; - inlen -= ESCH_384_RATE; - } - if (inlen == ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(s, block, 0x01); - } - sparkle_512(s, 12); - memcpy(out, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); - return 0; -} - -void esch_384_hash_init(esch_384_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_384_hash_state_t)); -} - -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x00); - sparkle_512(st->s.state, 8); - st->s.count = 0; - } - temp = ESCH_384_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(st->s.state, st->s.block, 0x01); - } - sparkle_512(st->s.state, 12); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); -} diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.h b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.h deleted file mode 100644 index dd0999e..0000000 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys-avr/sparkle.h +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPARKLE_H -#define LWCRYPTO_SPARKLE_H - -#include "aead-common.h" - -/** - * \file sparkle.h - * \brief Encryption and hash algorithms based on the SPARKLE permutation. - * - * SPARKLE is a family of encryption and hash algorithms that are based - * around the SPARKLE permutation. There are three versions of the - * permutation with 256-bit, 384-bit, and 512-bit state sizes. - * The algorithms in the family are: - * - * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. - * This is the primary encryption algorithm in the family. - * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. - * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. - * \li Esch256 hash algorithm with a 256-bit digest output. This is the - * primary hash algorithm in the family. - * \li Esch384 hash algorithm with a 384-bit digest output. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_NONCE_SIZE 32 - -/** - * \brief Size of the key for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash output for Esch256. - */ -#define ESCH_256_HASH_SIZE 32 - -/** - * \brief Size of the hash output for Esch384. - */ -#define ESCH_384_HASH_SIZE 48 - -/** - * \brief Meta-information block for the Schwaemm256-128 cipher. - */ -extern aead_cipher_t const schwaemm_256_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm192-192 cipher. - */ -extern aead_cipher_t const schwaemm_192_192_cipher; - -/** - * \brief Meta-information block for the Schwaemm128-128 cipher. - */ -extern aead_cipher_t const schwaemm_128_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm256-256 cipher. - */ -extern aead_cipher_t const schwaemm_256_256_cipher; - -/** - * \brief Meta-information block for the Esch256 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_256_hash_algorithm; - -/** - * \brief Meta-information block for the Esch384 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_384_hash_algorithm; - -/** - * \brief State information for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_256_hash_state_t; - -/** - * \brief State information for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[64]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_384_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_128_aead_decrypt() - */ -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_128_aead_encrypt() - */ -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm192-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 24 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_192_192_aead_decrypt() - */ -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm192-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 24 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_192_192_aead_encrypt() - */ -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm128-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_128_128_aead_decrypt() - */ -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm128-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_128_128_aead_encrypt() - */ -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_256_aead_decrypt() - */ -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_256_aead_encrypt() - */ -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Esch256 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch256 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() - */ -void esch_256_hash_init(esch_256_hash_state_t *state); - -/** - * \brief Updates an Esch256 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_256_hash_init(), esch_256_hash_finalize() - */ -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch256 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa esch_256_hash_init(), esch_256_hash_update() - */ -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with Esch384 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_384_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch384 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() - */ -void esch_384_hash_init(esch_384_hash_state_t *state); - -/** - * \brief Updates an Esch384 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_384_hash_init(), esch_384_hash_finalize() - */ -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch384 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 48-byte hash value. - * - * \sa esch_384_hash_init(), esch_384_hash_update() - */ -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle-avr.S b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle-avr.S new file mode 100644 index 0000000..753ea2f --- /dev/null +++ b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle-avr.S @@ -0,0 +1,2887 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global sparkle_256 + .type sparkle_256, @function +sparkle_256: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 129f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 129f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 129f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 129f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 129f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 129f + pop r18 + cpi r18,7 + brne 5094f + rjmp 615f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 129f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 129f + rjmp 615f +129: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + movw r18,r4 + movw r20,r6 + movw r4,r14 + movw r6,r12 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + movw r8,r18 + movw r10,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r14,r22 + movw r12,r26 + eor r14,r18 + eor r15,r19 + eor r12,r20 + eor r13,r21 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + movw r22,r16 + movw r26,r24 + eor r22,r28 + eor r23,r29 + eor r26,r2 + eor r27,r3 + movw r28,r14 + movw r2,r12 + ret +615: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_256, .-sparkle_256 + + .text +.global sparkle_384 + .type sparkle_384, @function +sparkle_384: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 140f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 140f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 140f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 140f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 140f + pop r18 + cpi r18,7 + brne 5094f + rjmp 886f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 140f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 140f + rjmp 886f +140: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r0,Z+4 + eor r18,r0 + ldd r0,Z+5 + eor r19,r0 + ldd r0,Z+6 + eor r20,r0 + ldd r0,Z+7 + eor r21,r0 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Z+28,r18 + std Z+29,r19 + std Z+30,r20 + std Z+31,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+36,r18 + std Z+37,r19 + std Z+38,r20 + std Z+39,r21 + eor r8,r14 + eor r9,r15 + eor r10,r12 + eor r11,r13 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+16 + ldd r29,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+24,r14 + std Z+25,r15 + std Z+26,r12 + std Z+27,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + eor r28,r16 + eor r29,r17 + eor r2,r24 + eor r3,r25 + ret +886: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_384, .-sparkle_384 + + .text +.global sparkle_512 + .type sparkle_512, @function +sparkle_512: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 151f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 151f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 151f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 151f + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 151f + pop r18 + cpi r18,8 + brne 5105f + rjmp 1189f +5105: + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,11 + eor r8,r18 + rcall 151f + rjmp 1189f +151: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+32,r22 + std Z+33,r23 + std Z+34,r26 + std Z+35,r27 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r22,Z+48 + ldd r23,Z+49 + ldd r26,Z+50 + ldd r27,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r28,Z+56 + ldd r29,Z+57 + ldd r2,Z+58 + ldd r3,Z+59 + ldd r8,Z+60 + ldd r9,Z+61 + ldd r10,Z+62 + ldd r11,Z+63 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Z+60,r8 + std Z+61,r9 + std Z+62,r10 + std Z+63,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+36,r8 + std Z+37,r9 + std Z+38,r10 + std Z+39,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r8,Z+52 + ldd r9,Z+53 + ldd r10,Z+54 + ldd r11,Z+55 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r0,Z+60 + eor r14,r0 + ldd r0,Z+61 + eor r15,r0 + ldd r0,Z+62 + eor r12,r0 + ldd r0,Z+63 + eor r13,r0 + std Z+20,r14 + std Z+21,r15 + std Z+22,r12 + std Z+23,r13 + movw r4,r18 + movw r6,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + std Z+48,r22 + std Z+49,r23 + std Z+50,r26 + std Z+51,r27 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r14,Z+24 + ldd r15,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+56,r14 + std Z+57,r15 + std Z+58,r12 + std Z+59,r13 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r22,r14 + eor r23,r15 + eor r26,r12 + eor r27,r13 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+32,r14 + std Z+33,r15 + std Z+34,r12 + std Z+35,r13 + ldd r14,Z+8 + ldd r15,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + movw r22,r18 + movw r26,r20 + std Z+40,r14 + std Z+41,r15 + std Z+42,r12 + std Z+43,r13 + ldd r28,Z+48 + ldd r29,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + eor r28,r14 + eor r29,r15 + eor r2,r12 + eor r3,r13 + std Z+48,r14 + std Z+49,r15 + std Z+50,r12 + std Z+51,r13 + ldd r0,Z+56 + eor r16,r0 + ldd r0,Z+57 + eor r17,r0 + ldd r0,Z+58 + eor r24,r0 + ldd r0,Z+59 + eor r25,r0 + std Z+16,r16 + std Z+17,r17 + std Z+18,r24 + std Z+19,r25 + ret +1189: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_512, .-sparkle_512 + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle.c index 822af50..4a4c0fb 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-sparkle.c @@ -22,6 +22,8 @@ #include "internal-sparkle.h" +#if !defined(__AVR__) + /* The 8 basic round constants from the specification */ #define RC_0 0xB7E15162 #define RC_1 0xBF715880 @@ -66,7 +68,7 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3; uint32_t y0, y1, y2, y3; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-256 state up into local variables */ @@ -105,18 +107,20 @@ void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1; ty = y0 ^ y1; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x3 ^ x1 ^ ty; - x3 = x1; - y0 = y3 ^ y1 ^ tx; + y2 ^= tx; + tx ^= y3; y3 = y1; - x1 = x2 ^ tw ^ ty; - x2 = tw; - y1 = y2 ^ tz ^ tx; - y2 = tz; + y1 = y2 ^ y0; + y2 = y0; + y0 = tx ^ y3; + x2 ^= ty; + ty ^= x3; + x3 = x1; + x1 = x2 ^ x0; + x2 = x0; + x0 = ty ^ x3; } /* Write the local variables back to the SPARKLE-256 state */ @@ -145,7 +149,7 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5; uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-384 state up into local variables */ @@ -194,22 +198,26 @@ void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2; ty = y0 ^ y1 ^ y2; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x4 ^ x1 ^ ty; - x4 = x1; - y0 = y4 ^ y1 ^ tx; + y3 ^= tx; + y4 ^= tx; + tx ^= y5; + y5 = y2; + y2 = y3 ^ y0; + y3 = y0; + y0 = y4 ^ y1; y4 = y1; - x1 = x5 ^ x2 ^ ty; + y1 = tx ^ y5; + x3 ^= ty; + x4 ^= ty; + ty ^= x5; x5 = x2; - y1 = y5 ^ y2 ^ tx; - y5 = y2; - x2 = x3 ^ tw ^ ty; - x3 = tw; - y2 = y3 ^ tz ^ tx; - y3 = tz; + x2 = x3 ^ x0; + x3 = x0; + x0 = x4 ^ x1; + x4 = x1; + x1 = ty ^ x5; } /* Write the local variables back to the SPARKLE-384 state */ @@ -246,7 +254,7 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) { uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty, tz, tw; + uint32_t tx, ty; unsigned step; /* Load the SPARKLE-512 state up into local variables */ @@ -305,26 +313,32 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) /* Linear layer */ tx = x0 ^ x1 ^ x2 ^ x3; ty = y0 ^ y1 ^ y2 ^ y3; - tw = x0; - tz = y0; tx = leftRotate16(tx ^ (tx << 16)); ty = leftRotate16(ty ^ (ty << 16)); - x0 = x5 ^ x1 ^ ty; - x5 = x1; - y0 = y5 ^ y1 ^ tx; + y4 ^= tx; + y5 ^= tx; + y6 ^= tx; + tx ^= y7; + y7 = y3; + y3 = y4 ^ y0; + y4 = y0; + y0 = y5 ^ y1; y5 = y1; - x1 = x6 ^ x2 ^ ty; - x6 = x2; - y1 = y6 ^ y2 ^ tx; + y1 = y6 ^ y2; y6 = y2; - x2 = x7 ^ x3 ^ ty; + y2 = tx ^ y7; + x4 ^= ty; + x5 ^= ty; + x6 ^= ty; + ty ^= x7; x7 = x3; - y2 = y7 ^ y3 ^ tx; - y7 = y3; - x3 = x4 ^ tw ^ ty; - x4 = tw; - y3 = y4 ^ tz ^ tx; - y4 = tz; + x3 = x4 ^ x0; + x4 = x0; + x0 = x5 ^ x1; + x5 = x1; + x1 = x6 ^ x2; + x6 = x2; + x2 = ty ^ x7; } /* Write the local variables back to the SPARKLE-512 state */ @@ -364,3 +378,5 @@ void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) le_store_word32((uint8_t *)&(s[15]), y7); #endif } + +#endif diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-util.h b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-util.h +++ b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/sparkle.c b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/sparkle.c index b357de6..e2aa25a 100644 --- a/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/sparkle.c +++ b/sparkle/Implementations/crypto_aead/schwaemm256256v1/rhys/sparkle.c @@ -123,24 +123,21 @@ aead_hash_algorithm_t const esch_384_hash_algorithm = { * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_128_rho(s, domain) \ +#define schwaemm_256_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[8]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[9]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[10]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[8]; \ - s[5] ^= t1 ^ s[9]; \ - s[6] ^= t2 ^ s[10]; \ - s[7] ^= t3 ^ s[11]; \ + s[7] ^= t ^ s[11]; \ } while (0) /** @@ -155,18 +152,20 @@ static void schwaemm_256_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); ad += SCHWAEMM_256_128_RATE; adlen -= SCHWAEMM_256_128_RATE; } if (adlen == SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s, 0x05); + s[11] ^= DOMAIN(0x05); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_128_rho(s, 0x04); + s[11] ^= DOMAIN(0x04); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -202,7 +201,7 @@ int schwaemm_256_128_aead_encrypt while (mlen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_256_128_RATE); @@ -213,13 +212,15 @@ int schwaemm_256_128_aead_encrypt if (mlen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); memcpy(c, block, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -266,7 +267,7 @@ int schwaemm_256_128_aead_decrypt while (clen > SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x00); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); sparkle_384(s, 7); c += SCHWAEMM_256_128_RATE; @@ -276,12 +277,14 @@ int schwaemm_256_128_aead_decrypt if (clen == SCHWAEMM_256_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s, 0x07); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_128_rho(s, 0x06); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -315,21 +318,18 @@ int schwaemm_256_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. * * \param s SPARKLE-384 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_192_192_rho(s, domain) \ +#define schwaemm_192_192_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - if ((domain) != 0) \ - s[11] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[3] ^ s[6]; \ + s[3] ^= t ^ s[9]; \ + t = s[1]; \ s[1] = s[4] ^ s[7]; \ + s[4] ^= t ^ s[10]; \ + t = s[2]; \ s[2] = s[5] ^ s[8]; \ - s[3] ^= t0 ^ s[9]; \ - s[4] ^= t1 ^ s[10]; \ - s[5] ^= t2 ^ s[11]; \ + s[5] ^= t ^ s[11]; \ } while (0) /** @@ -344,18 +344,20 @@ static void schwaemm_192_192_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); ad += SCHWAEMM_192_192_RATE; adlen -= SCHWAEMM_192_192_RATE; } if (adlen == SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s, 0x09); + s[11] ^= DOMAIN(0x09); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_192_192_rho(s, 0x08); + s[11] ^= DOMAIN(0x08); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -391,7 +393,7 @@ int schwaemm_192_192_aead_encrypt while (mlen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); memcpy(c, block, SCHWAEMM_192_192_RATE); @@ -402,13 +404,15 @@ int schwaemm_192_192_aead_encrypt if (mlen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); memcpy(c, block, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -455,7 +459,7 @@ int schwaemm_192_192_aead_decrypt while (clen > SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x00); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); sparkle_384(s, 7); c += SCHWAEMM_192_192_RATE; @@ -465,12 +469,14 @@ int schwaemm_192_192_aead_decrypt if (clen == SCHWAEMM_192_192_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s, 0x0B); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_192_192_rho(s, 0x0A); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -504,18 +510,15 @@ int schwaemm_192_192_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. * * \param s SPARKLE-256 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_128_128_rho(s, domain) \ +#define schwaemm_128_128_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[2] ^ s[4]; \ + s[2] ^= t ^ s[6]; \ + t = s[1]; \ s[1] = s[3] ^ s[5]; \ - s[2] ^= t0 ^ s[6]; \ - s[3] ^= t1 ^ s[7]; \ + s[3] ^= t ^ s[7]; \ } while (0) /** @@ -530,18 +533,20 @@ static void schwaemm_128_128_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); ad += SCHWAEMM_128_128_RATE; adlen -= SCHWAEMM_128_128_RATE; } if (adlen == SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s, 0x05); + s[7] ^= DOMAIN(0x05); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_128_128_rho(s, 0x04); + s[7] ^= DOMAIN(0x04); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -577,7 +582,7 @@ int schwaemm_128_128_aead_encrypt while (mlen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); memcpy(c, block, SCHWAEMM_128_128_RATE); @@ -588,13 +593,15 @@ int schwaemm_128_128_aead_encrypt if (mlen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); memcpy(c, block, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -641,7 +648,7 @@ int schwaemm_128_128_aead_decrypt while (clen > SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x00); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); sparkle_256(s, 7); c += SCHWAEMM_128_128_RATE; @@ -651,12 +658,14 @@ int schwaemm_128_128_aead_decrypt if (clen == SCHWAEMM_128_128_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s, 0x07); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_128_128_rho(s, 0x06); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -690,24 +699,21 @@ int schwaemm_128_128_aead_decrypt * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. * * \param s SPARKLE-512 state. - * \param domain Domain separator for this phase. */ -#define schwaemm_256_256_rho(s, domain) \ +#define schwaemm_256_256_rho(s) \ do { \ - uint32_t t0 = s[0]; \ - uint32_t t1 = s[1]; \ - uint32_t t2 = s[2]; \ - uint32_t t3 = s[3]; \ - if ((domain) != 0) \ - s[15] ^= DOMAIN(domain); \ + uint32_t t = s[0]; \ s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[12]; \ + t = s[1]; \ s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[13]; \ + t = s[2]; \ s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[14]; \ + t = s[3]; \ s[3] = s[7] ^ s[11]; \ - s[4] ^= t0 ^ s[12]; \ - s[5] ^= t1 ^ s[13]; \ - s[6] ^= t2 ^ s[14]; \ - s[7] ^= t3 ^ s[15]; \ + s[7] ^= t ^ s[15]; \ } while (0) /** @@ -722,18 +728,20 @@ static void schwaemm_256_256_authenticate const unsigned char *ad, unsigned long long adlen) { while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); ad += SCHWAEMM_256_256_RATE; adlen -= SCHWAEMM_256_256_RATE; } if (adlen == SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s, 0x11); + s[15] ^= DOMAIN(0x11); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)adlen; - schwaemm_256_256_rho(s, 0x10); + s[15] ^= DOMAIN(0x10); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, ad, temp); ((unsigned char *)s)[temp] ^= 0x80; } @@ -769,7 +777,7 @@ int schwaemm_256_256_aead_encrypt while (mlen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); memcpy(c, block, SCHWAEMM_256_256_RATE); @@ -780,13 +788,15 @@ int schwaemm_256_256_aead_encrypt if (mlen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); memcpy(c, block, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)mlen; lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; memcpy(c, block, temp); @@ -833,7 +843,7 @@ int schwaemm_256_256_aead_decrypt while (clen > SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x00); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); sparkle_512(s, 8); c += SCHWAEMM_256_256_RATE; @@ -843,12 +853,14 @@ int schwaemm_256_256_aead_decrypt if (clen == SCHWAEMM_256_256_RATE) { lw_xor_block_2_src (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s, 0x13); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); } else { unsigned temp = (unsigned)clen; lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - schwaemm_256_256_rho(s, 0x12); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); lw_xor_block((unsigned char *)s, m, temp); ((unsigned char *)s)[temp] ^= 0x80; } diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/api.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/hash.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/hash.c deleted file mode 100644 index b9163f6..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "sparkle.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return esch_256_hash(out, in, inlen); -} diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle-avr.S b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle-avr.S deleted file mode 100644 index 753ea2f..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle-avr.S +++ /dev/null @@ -1,2887 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global sparkle_256 - .type sparkle_256, @function -sparkle_256: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 129f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 129f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 129f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 129f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 129f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 129f - pop r18 - cpi r18,7 - brne 5094f - rjmp 615f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 129f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 129f - rjmp 615f -129: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - movw r18,r4 - movw r20,r6 - movw r4,r14 - movw r6,r12 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - movw r8,r18 - movw r10,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r14,r22 - movw r12,r26 - eor r14,r18 - eor r15,r19 - eor r12,r20 - eor r13,r21 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - movw r22,r16 - movw r26,r24 - eor r22,r28 - eor r23,r29 - eor r26,r2 - eor r27,r3 - movw r28,r14 - movw r2,r12 - ret -615: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_256, .-sparkle_256 - - .text -.global sparkle_384 - .type sparkle_384, @function -sparkle_384: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 140f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 140f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 140f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 140f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 140f - pop r18 - cpi r18,7 - brne 5094f - rjmp 886f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 140f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 140f - rjmp 886f -140: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r0,Z+4 - eor r18,r0 - ldd r0,Z+5 - eor r19,r0 - ldd r0,Z+6 - eor r20,r0 - ldd r0,Z+7 - eor r21,r0 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Z+28,r18 - std Z+29,r19 - std Z+30,r20 - std Z+31,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+36,r18 - std Z+37,r19 - std Z+38,r20 - std Z+39,r21 - eor r8,r14 - eor r9,r15 - eor r10,r12 - eor r11,r13 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+16 - ldd r29,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+24,r14 - std Z+25,r15 - std Z+26,r12 - std Z+27,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - eor r28,r16 - eor r29,r17 - eor r2,r24 - eor r3,r25 - ret -886: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_384, .-sparkle_384 - - .text -.global sparkle_512 - .type sparkle_512, @function -sparkle_512: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 151f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 151f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 151f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 151f - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 151f - pop r18 - cpi r18,8 - brne 5105f - rjmp 1189f -5105: - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,11 - eor r8,r18 - rcall 151f - rjmp 1189f -151: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+32,r22 - std Z+33,r23 - std Z+34,r26 - std Z+35,r27 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r22,Z+48 - ldd r23,Z+49 - ldd r26,Z+50 - ldd r27,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r28,Z+56 - ldd r29,Z+57 - ldd r2,Z+58 - ldd r3,Z+59 - ldd r8,Z+60 - ldd r9,Z+61 - ldd r10,Z+62 - ldd r11,Z+63 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Z+60,r8 - std Z+61,r9 - std Z+62,r10 - std Z+63,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+36,r8 - std Z+37,r9 - std Z+38,r10 - std Z+39,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r8,Z+52 - ldd r9,Z+53 - ldd r10,Z+54 - ldd r11,Z+55 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r0,Z+60 - eor r14,r0 - ldd r0,Z+61 - eor r15,r0 - ldd r0,Z+62 - eor r12,r0 - ldd r0,Z+63 - eor r13,r0 - std Z+20,r14 - std Z+21,r15 - std Z+22,r12 - std Z+23,r13 - movw r4,r18 - movw r6,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - std Z+48,r22 - std Z+49,r23 - std Z+50,r26 - std Z+51,r27 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r14,Z+24 - ldd r15,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+56,r14 - std Z+57,r15 - std Z+58,r12 - std Z+59,r13 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r22,r14 - eor r23,r15 - eor r26,r12 - eor r27,r13 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+32,r14 - std Z+33,r15 - std Z+34,r12 - std Z+35,r13 - ldd r14,Z+8 - ldd r15,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - movw r22,r18 - movw r26,r20 - std Z+40,r14 - std Z+41,r15 - std Z+42,r12 - std Z+43,r13 - ldd r28,Z+48 - ldd r29,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - eor r28,r14 - eor r29,r15 - eor r2,r12 - eor r3,r13 - std Z+48,r14 - std Z+49,r15 - std Z+50,r12 - std Z+51,r13 - ldd r0,Z+56 - eor r16,r0 - ldd r0,Z+57 - eor r17,r0 - ldd r0,Z+58 - eor r24,r0 - ldd r0,Z+59 - eor r25,r0 - std Z+16,r16 - std Z+17,r17 - std Z+18,r24 - std Z+19,r25 - ret -1189: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_512, .-sparkle_512 - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.c deleted file mode 100644 index 4a4c0fb..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.c +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sparkle.h" - -#if !defined(__AVR__) - -/* The 8 basic round constants from the specification */ -#define RC_0 0xB7E15162 -#define RC_1 0xBF715880 -#define RC_2 0x38B4DA56 -#define RC_3 0x324E7738 -#define RC_4 0xBB1185EB -#define RC_5 0x4F7C7B57 -#define RC_6 0xCFBFA1C8 -#define RC_7 0xC2B3293D - -/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ -static uint32_t const sparkle_rc[12] = { - RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, - RC_0, RC_1, RC_2, RC_3 -}; - -/** - * \brief Alzette block cipher that implements the ARXbox layer of the - * SPARKLE permutation. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param k 32-bit round key. - */ -#define alzette(x, y, k) \ - do { \ - (x) += leftRotate1((y)); \ - (y) ^= leftRotate8((x)); \ - (x) ^= (k); \ - (x) += leftRotate15((y)); \ - (y) ^= leftRotate15((x)); \ - (x) ^= (k); \ - (x) += (y); \ - (y) ^= leftRotate1((x)); \ - (x) ^= (k); \ - (x) += leftRotate8((y)); \ - (y) ^= leftRotate16((x)); \ - (x) ^= (k); \ - } while (0) - -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3; - uint32_t y0, y1, y2, y3; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-256 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - - /* Linear layer */ - tx = x0 ^ x1; - ty = y0 ^ y1; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y2 ^= tx; - tx ^= y3; - y3 = y1; - y1 = y2 ^ y0; - y2 = y0; - y0 = tx ^ y3; - x2 ^= ty; - ty ^= x3; - x3 = x1; - x1 = x2 ^ x0; - x2 = x0; - x0 = ty ^ x3; - } - - /* Write the local variables back to the SPARKLE-256 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); -#endif -} - -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-384 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2; - ty = y0 ^ y1 ^ y2; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y3 ^= tx; - y4 ^= tx; - tx ^= y5; - y5 = y2; - y2 = y3 ^ y0; - y3 = y0; - y0 = y4 ^ y1; - y4 = y1; - y1 = tx ^ y5; - x3 ^= ty; - x4 ^= ty; - ty ^= x5; - x5 = x2; - x2 = x3 ^ x0; - x3 = x0; - x0 = x4 ^ x1; - x4 = x1; - x1 = ty ^ x5; - } - - /* Write the local variables back to the SPARKLE-384 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); -#endif -} - -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-512 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; - x6 = s[12]; - y6 = s[13]; - x7 = s[14]; - y7 = s[15]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); - x6 = le_load_word32((const uint8_t *)&(s[12])); - y6 = le_load_word32((const uint8_t *)&(s[13])); - x7 = le_load_word32((const uint8_t *)&(s[14])); - y7 = le_load_word32((const uint8_t *)&(s[15])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - alzette(x6, y6, RC_6); - alzette(x7, y7, RC_7); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2 ^ x3; - ty = y0 ^ y1 ^ y2 ^ y3; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y4 ^= tx; - y5 ^= tx; - y6 ^= tx; - tx ^= y7; - y7 = y3; - y3 = y4 ^ y0; - y4 = y0; - y0 = y5 ^ y1; - y5 = y1; - y1 = y6 ^ y2; - y6 = y2; - y2 = tx ^ y7; - x4 ^= ty; - x5 ^= ty; - x6 ^= ty; - ty ^= x7; - x7 = x3; - x3 = x4 ^ x0; - x4 = x0; - x0 = x5 ^ x1; - x5 = x1; - x1 = x6 ^ x2; - x6 = x2; - x2 = ty ^ x7; - } - - /* Write the local variables back to the SPARKLE-512 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; - s[12] = x6; - s[13] = y6; - s[14] = x7; - s[15] = y7; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); - le_store_word32((uint8_t *)&(s[12]), x6); - le_store_word32((uint8_t *)&(s[13]), y6); - le_store_word32((uint8_t *)&(s[14]), x7); - le_store_word32((uint8_t *)&(s[15]), y7); -#endif -} - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.h deleted file mode 100644 index fbdabc1..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-sparkle.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPARKLE_H -#define LW_INTERNAL_SPARKLE_H - -#include "internal-util.h" - -/** - * \file internal-sparkle.h - * \brief Internal implementation of the SPARKLE permutation. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for SPARKLE-256. - */ -#define SPARKLE_256_STATE_SIZE 8 - -/** - * \brief Size of the state for SPARKLE-384. - */ -#define SPARKLE_384_STATE_SIZE 12 - -/** - * \brief Size of the state for SPARKLE-512. - */ -#define SPARKLE_512_STATE_SIZE 16 - -/** - * \brief Performs the SPARKLE-256 permutation. - * - * \param s The words of the SPARKLE-256 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 10. - */ -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-384 permutation. - * - * \param s The words of the SPARKLE-384 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 11. - */ -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-512 permutation. - * - * \param s The words of the SPARKLE-512 state in little-endian byte order. - * \param steps The number of steps to perform, 8 or 12. - */ -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-util.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.c deleted file mode 100644 index e2aa25a..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.c +++ /dev/null @@ -1,1135 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sparkle.h" -#include "internal-sparkle.h" -#include - -aead_cipher_t const schwaemm_256_128_cipher = { - "Schwaemm256-128", - SCHWAEMM_256_128_KEY_SIZE, - SCHWAEMM_256_128_NONCE_SIZE, - SCHWAEMM_256_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_128_aead_encrypt, - schwaemm_256_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_192_192_cipher = { - "Schwaemm192-192", - SCHWAEMM_192_192_KEY_SIZE, - SCHWAEMM_192_192_NONCE_SIZE, - SCHWAEMM_192_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_192_192_aead_encrypt, - schwaemm_192_192_aead_decrypt -}; - -aead_cipher_t const schwaemm_128_128_cipher = { - "Schwaemm128-128", - SCHWAEMM_128_128_KEY_SIZE, - SCHWAEMM_128_128_NONCE_SIZE, - SCHWAEMM_128_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_128_128_aead_encrypt, - schwaemm_128_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_256_256_cipher = { - "Schwaemm256-256", - SCHWAEMM_256_256_KEY_SIZE, - SCHWAEMM_256_256_NONCE_SIZE, - SCHWAEMM_256_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_256_aead_encrypt, - schwaemm_256_256_aead_decrypt -}; - -aead_hash_algorithm_t const esch_256_hash_algorithm = { - "Esch256", - sizeof(esch_256_hash_state_t), - ESCH_256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_256_hash, - (aead_hash_init_t)esch_256_hash_init, - (aead_hash_update_t)esch_256_hash_update, - (aead_hash_finalize_t)esch_256_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const esch_384_hash_algorithm = { - "Esch384", - sizeof(esch_384_hash_state_t), - ESCH_384_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_384_hash, - (aead_hash_init_t)esch_384_hash_init, - (aead_hash_update_t)esch_384_hash_update, - (aead_hash_finalize_t)esch_384_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \def DOMAIN(value) - * \brief Build a domain separation value as a 32-bit word. - * - * \param value The base value. - * \return The domain separation value as a 32-bit word. - */ -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define DOMAIN(value) (((uint32_t)(value)) << 24) -#else -#define DOMAIN(value) (value) -#endif - -/** - * \brief Rate at which bytes are processed by Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RIGHT(s) \ - (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_256_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[8]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[9]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[10]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_128_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_256_128_RATE; - adlen -= SCHWAEMM_256_128_RATE; - } - if (adlen == SCHWAEMM_256_128_RATE) { - s[11] ^= DOMAIN(0x05); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x04); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_256_128_RATE); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - mlen -= SCHWAEMM_256_128_RATE; - } - if (mlen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - memcpy(c, block, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return 0; -} - -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - clen -= SCHWAEMM_256_128_RATE; - } - if (clen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RATE 24 - -/** - * \brief Pointer to the left of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RIGHT(s) \ - (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_192_192_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[3] ^ s[6]; \ - s[3] ^= t ^ s[9]; \ - t = s[1]; \ - s[1] = s[4] ^ s[7]; \ - s[4] ^= t ^ s[10]; \ - t = s[2]; \ - s[2] = s[5] ^ s[8]; \ - s[5] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_192_192_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_192_192_RATE; - adlen -= SCHWAEMM_192_192_RATE; - } - if (adlen == SCHWAEMM_192_192_RATE) { - s[11] ^= DOMAIN(0x09); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x08); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_192_192_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_192_192_RATE); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - mlen -= SCHWAEMM_192_192_RATE; - } - if (mlen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - memcpy(c, block, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return 0; -} - -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_192_192_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_192_192_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - clen -= SCHWAEMM_192_192_RATE; - } - if (clen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RATE 16 - -/** - * \brief Pointer to the left of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RIGHT(s) \ - (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - */ -#define schwaemm_128_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[2] ^ s[4]; \ - s[2] ^= t ^ s[6]; \ - t = s[1]; \ - s[1] = s[3] ^ s[5]; \ - s[3] ^= t ^ s[7]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_128_128_authenticate - (uint32_t s[SPARKLE_256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - ad += SCHWAEMM_128_128_RATE; - adlen -= SCHWAEMM_128_128_RATE; - } - if (adlen == SCHWAEMM_128_128_RATE) { - s[7] ^= DOMAIN(0x05); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[7] ^= DOMAIN(0x04); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); -} - -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - uint8_t block[SCHWAEMM_128_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - memcpy(c, block, SCHWAEMM_128_128_RATE); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - mlen -= SCHWAEMM_128_128_RATE; - } - if (mlen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - memcpy(c, block, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_256(s, 10); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return 0; -} - -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_128_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_128_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - clen -= SCHWAEMM_128_128_RATE; - } - if (clen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RIGHT(s) \ - (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - */ -#define schwaemm_256_256_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[12]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[13]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[14]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[15]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_256_authenticate - (uint32_t s[SPARKLE_512_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - ad += SCHWAEMM_256_256_RATE; - adlen -= SCHWAEMM_256_256_RATE; - } - if (adlen == SCHWAEMM_256_256_RATE) { - s[15] ^= DOMAIN(0x11); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[15] ^= DOMAIN(0x10); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); -} - -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_256_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - memcpy(c, block, SCHWAEMM_256_256_RATE); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - mlen -= SCHWAEMM_256_256_RATE; - } - if (mlen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - memcpy(c, block, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_512(s, 12); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return 0; -} - -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_256_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_256_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - clen -= SCHWAEMM_256_256_RATE; - } - if (clen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Esch256. - */ -#define ESCH_256_RATE 16 - -/** - * \brief Perform the M3 step for Esch256 to mix the input with the state. - * - * \param s SPARKLE-384 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_256_m3(s, block, domain) \ - do { \ - uint32_t tx = (block)[0] ^ (block)[2]; \ - uint32_t ty = (block)[1] ^ (block)[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= (block)[0] ^ ty; \ - s[1] ^= (block)[1] ^ tx; \ - s[2] ^= (block)[2] ^ ty; \ - s[3] ^= (block)[3] ^ tx; \ - if ((domain) != 0) \ - s[5] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - } while (0) - -/** @cond esch_256 */ - -/** - * \brief Word-based state for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_384_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_256_hash_state_wt; - -/** @endcond */ - -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x00); - sparkle_384(s, 7); - in += ESCH_256_RATE; - inlen -= ESCH_256_RATE; - } - if (inlen == ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(s, block, 0x01); - } - sparkle_384(s, 11); - memcpy(out, s, ESCH_256_RATE); - sparkle_384(s, 7); - memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); - return 0; -} - -void esch_256_hash_init(esch_256_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_256_hash_state_t)); -} - -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x00); - sparkle_384(st->s.state, 7); - st->s.count = 0; - } - temp = ESCH_256_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(st->s.state, st->s.block, 0x01); - } - sparkle_384(st->s.state, 11); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_256_RATE); - sparkle_384(st->s.state, 7); - memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); -} - -/** - * \brief Rate at which bytes are processed by Esch384. - */ -#define ESCH_384_RATE 16 - -/** - * \brief Perform the M4 step for Esch384 to mix the input with the state. - * - * \param s SPARKLE-512 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_384_m4(s, block, domain) \ - do { \ - uint32_t tx = block[0] ^ block[2]; \ - uint32_t ty = block[1] ^ block[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= block[0] ^ ty; \ - s[1] ^= block[1] ^ tx; \ - s[2] ^= block[2] ^ ty; \ - s[3] ^= block[3] ^ tx; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - s[6] ^= ty; \ - s[7] ^= tx; \ - } while (0) - -/** @cond esch_384 */ - -/** - * \brief Word-based state for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_512_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_384_hash_state_wt; - -/** @endcond */ - -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x00); - sparkle_512(s, 8); - in += ESCH_384_RATE; - inlen -= ESCH_384_RATE; - } - if (inlen == ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(s, block, 0x01); - } - sparkle_512(s, 12); - memcpy(out, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); - return 0; -} - -void esch_384_hash_init(esch_384_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_384_hash_state_t)); -} - -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x00); - sparkle_512(st->s.state, 8); - st->s.count = 0; - } - temp = ESCH_384_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(st->s.state, st->s.block, 0x01); - } - sparkle_512(st->s.state, 12); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); -} diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.h deleted file mode 100644 index dd0999e..0000000 --- a/sparkle/Implementations/crypto_hash/esch256v1/rhys-avr/sparkle.h +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPARKLE_H -#define LWCRYPTO_SPARKLE_H - -#include "aead-common.h" - -/** - * \file sparkle.h - * \brief Encryption and hash algorithms based on the SPARKLE permutation. - * - * SPARKLE is a family of encryption and hash algorithms that are based - * around the SPARKLE permutation. There are three versions of the - * permutation with 256-bit, 384-bit, and 512-bit state sizes. - * The algorithms in the family are: - * - * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. - * This is the primary encryption algorithm in the family. - * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. - * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. - * \li Esch256 hash algorithm with a 256-bit digest output. This is the - * primary hash algorithm in the family. - * \li Esch384 hash algorithm with a 384-bit digest output. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_NONCE_SIZE 32 - -/** - * \brief Size of the key for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash output for Esch256. - */ -#define ESCH_256_HASH_SIZE 32 - -/** - * \brief Size of the hash output for Esch384. - */ -#define ESCH_384_HASH_SIZE 48 - -/** - * \brief Meta-information block for the Schwaemm256-128 cipher. - */ -extern aead_cipher_t const schwaemm_256_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm192-192 cipher. - */ -extern aead_cipher_t const schwaemm_192_192_cipher; - -/** - * \brief Meta-information block for the Schwaemm128-128 cipher. - */ -extern aead_cipher_t const schwaemm_128_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm256-256 cipher. - */ -extern aead_cipher_t const schwaemm_256_256_cipher; - -/** - * \brief Meta-information block for the Esch256 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_256_hash_algorithm; - -/** - * \brief Meta-information block for the Esch384 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_384_hash_algorithm; - -/** - * \brief State information for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_256_hash_state_t; - -/** - * \brief State information for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[64]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_384_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_128_aead_decrypt() - */ -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_128_aead_encrypt() - */ -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm192-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 24 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_192_192_aead_decrypt() - */ -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm192-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 24 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_192_192_aead_encrypt() - */ -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm128-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_128_128_aead_decrypt() - */ -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm128-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_128_128_aead_encrypt() - */ -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_256_aead_decrypt() - */ -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_256_aead_encrypt() - */ -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Esch256 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch256 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() - */ -void esch_256_hash_init(esch_256_hash_state_t *state); - -/** - * \brief Updates an Esch256 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_256_hash_init(), esch_256_hash_finalize() - */ -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch256 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa esch_256_hash_init(), esch_256_hash_update() - */ -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with Esch384 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_384_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch384 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() - */ -void esch_384_hash_init(esch_384_hash_state_t *state); - -/** - * \brief Updates an Esch384 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_384_hash_init(), esch_384_hash_finalize() - */ -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch384 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 48-byte hash value. - * - * \sa esch_384_hash_init(), esch_384_hash_update() - */ -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/api.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/hash.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys/hash.c new file mode 100644 index 0000000..b9163f6 --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "sparkle.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return esch_256_hash(out, in, inlen); +} diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle-avr.S b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle-avr.S new file mode 100644 index 0000000..753ea2f --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle-avr.S @@ -0,0 +1,2887 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global sparkle_256 + .type sparkle_256, @function +sparkle_256: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 129f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 129f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 129f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 129f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 129f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 129f + pop r18 + cpi r18,7 + brne 5094f + rjmp 615f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 129f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 129f + rjmp 615f +129: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + movw r18,r4 + movw r20,r6 + movw r4,r14 + movw r6,r12 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + movw r8,r18 + movw r10,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r14,r22 + movw r12,r26 + eor r14,r18 + eor r15,r19 + eor r12,r20 + eor r13,r21 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + movw r22,r16 + movw r26,r24 + eor r22,r28 + eor r23,r29 + eor r26,r2 + eor r27,r3 + movw r28,r14 + movw r2,r12 + ret +615: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_256, .-sparkle_256 + + .text +.global sparkle_384 + .type sparkle_384, @function +sparkle_384: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 140f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 140f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 140f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 140f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 140f + pop r18 + cpi r18,7 + brne 5094f + rjmp 886f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 140f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 140f + rjmp 886f +140: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r0,Z+4 + eor r18,r0 + ldd r0,Z+5 + eor r19,r0 + ldd r0,Z+6 + eor r20,r0 + ldd r0,Z+7 + eor r21,r0 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Z+28,r18 + std Z+29,r19 + std Z+30,r20 + std Z+31,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+36,r18 + std Z+37,r19 + std Z+38,r20 + std Z+39,r21 + eor r8,r14 + eor r9,r15 + eor r10,r12 + eor r11,r13 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+16 + ldd r29,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+24,r14 + std Z+25,r15 + std Z+26,r12 + std Z+27,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + eor r28,r16 + eor r29,r17 + eor r2,r24 + eor r3,r25 + ret +886: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_384, .-sparkle_384 + + .text +.global sparkle_512 + .type sparkle_512, @function +sparkle_512: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 151f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 151f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 151f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 151f + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 151f + pop r18 + cpi r18,8 + brne 5105f + rjmp 1189f +5105: + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,11 + eor r8,r18 + rcall 151f + rjmp 1189f +151: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+32,r22 + std Z+33,r23 + std Z+34,r26 + std Z+35,r27 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r22,Z+48 + ldd r23,Z+49 + ldd r26,Z+50 + ldd r27,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r28,Z+56 + ldd r29,Z+57 + ldd r2,Z+58 + ldd r3,Z+59 + ldd r8,Z+60 + ldd r9,Z+61 + ldd r10,Z+62 + ldd r11,Z+63 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Z+60,r8 + std Z+61,r9 + std Z+62,r10 + std Z+63,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+36,r8 + std Z+37,r9 + std Z+38,r10 + std Z+39,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r8,Z+52 + ldd r9,Z+53 + ldd r10,Z+54 + ldd r11,Z+55 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r0,Z+60 + eor r14,r0 + ldd r0,Z+61 + eor r15,r0 + ldd r0,Z+62 + eor r12,r0 + ldd r0,Z+63 + eor r13,r0 + std Z+20,r14 + std Z+21,r15 + std Z+22,r12 + std Z+23,r13 + movw r4,r18 + movw r6,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + std Z+48,r22 + std Z+49,r23 + std Z+50,r26 + std Z+51,r27 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r14,Z+24 + ldd r15,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+56,r14 + std Z+57,r15 + std Z+58,r12 + std Z+59,r13 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r22,r14 + eor r23,r15 + eor r26,r12 + eor r27,r13 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+32,r14 + std Z+33,r15 + std Z+34,r12 + std Z+35,r13 + ldd r14,Z+8 + ldd r15,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + movw r22,r18 + movw r26,r20 + std Z+40,r14 + std Z+41,r15 + std Z+42,r12 + std Z+43,r13 + ldd r28,Z+48 + ldd r29,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + eor r28,r14 + eor r29,r15 + eor r2,r12 + eor r3,r13 + std Z+48,r14 + std Z+49,r15 + std Z+50,r12 + std Z+51,r13 + ldd r0,Z+56 + eor r16,r0 + ldd r0,Z+57 + eor r17,r0 + ldd r0,Z+58 + eor r24,r0 + ldd r0,Z+59 + eor r25,r0 + std Z+16,r16 + std Z+17,r17 + std Z+18,r24 + std Z+19,r25 + ret +1189: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_512, .-sparkle_512 + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.c new file mode 100644 index 0000000..4a4c0fb --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.c @@ -0,0 +1,382 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-sparkle.h" + +#if !defined(__AVR__) + +/* The 8 basic round constants from the specification */ +#define RC_0 0xB7E15162 +#define RC_1 0xBF715880 +#define RC_2 0x38B4DA56 +#define RC_3 0x324E7738 +#define RC_4 0xBB1185EB +#define RC_5 0x4F7C7B57 +#define RC_6 0xCFBFA1C8 +#define RC_7 0xC2B3293D + +/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ +static uint32_t const sparkle_rc[12] = { + RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, + RC_0, RC_1, RC_2, RC_3 +}; + +/** + * \brief Alzette block cipher that implements the ARXbox layer of the + * SPARKLE permutation. + * + * \param x Left half of the 64-bit block. + * \param y Right half of the 64-bit block. + * \param k 32-bit round key. + */ +#define alzette(x, y, k) \ + do { \ + (x) += leftRotate1((y)); \ + (y) ^= leftRotate8((x)); \ + (x) ^= (k); \ + (x) += leftRotate15((y)); \ + (y) ^= leftRotate15((x)); \ + (x) ^= (k); \ + (x) += (y); \ + (y) ^= leftRotate1((x)); \ + (x) ^= (k); \ + (x) += leftRotate8((y)); \ + (y) ^= leftRotate16((x)); \ + (x) ^= (k); \ + } while (0) + +void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) +{ + uint32_t x0, x1, x2, x3; + uint32_t y0, y1, y2, y3; + uint32_t tx, ty; + unsigned step; + + /* Load the SPARKLE-256 state up into local variables */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + x0 = s[0]; + y0 = s[1]; + x1 = s[2]; + y1 = s[3]; + x2 = s[4]; + y2 = s[5]; + x3 = s[6]; + y3 = s[7]; +#else + x0 = le_load_word32((const uint8_t *)&(s[0])); + y0 = le_load_word32((const uint8_t *)&(s[1])); + x1 = le_load_word32((const uint8_t *)&(s[2])); + y1 = le_load_word32((const uint8_t *)&(s[3])); + x2 = le_load_word32((const uint8_t *)&(s[4])); + y2 = le_load_word32((const uint8_t *)&(s[5])); + x3 = le_load_word32((const uint8_t *)&(s[6])); + y3 = le_load_word32((const uint8_t *)&(s[7])); +#endif + + /* Perform all requested steps */ + for (step = 0; step < steps; ++step) { + /* Add round constants */ + y0 ^= sparkle_rc[step]; + y1 ^= step; + + /* ARXbox layer */ + alzette(x0, y0, RC_0); + alzette(x1, y1, RC_1); + alzette(x2, y2, RC_2); + alzette(x3, y3, RC_3); + + /* Linear layer */ + tx = x0 ^ x1; + ty = y0 ^ y1; + tx = leftRotate16(tx ^ (tx << 16)); + ty = leftRotate16(ty ^ (ty << 16)); + y2 ^= tx; + tx ^= y3; + y3 = y1; + y1 = y2 ^ y0; + y2 = y0; + y0 = tx ^ y3; + x2 ^= ty; + ty ^= x3; + x3 = x1; + x1 = x2 ^ x0; + x2 = x0; + x0 = ty ^ x3; + } + + /* Write the local variables back to the SPARKLE-256 state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + s[0] = x0; + s[1] = y0; + s[2] = x1; + s[3] = y1; + s[4] = x2; + s[5] = y2; + s[6] = x3; + s[7] = y3; +#else + le_store_word32((uint8_t *)&(s[0]), x0); + le_store_word32((uint8_t *)&(s[1]), y0); + le_store_word32((uint8_t *)&(s[2]), x1); + le_store_word32((uint8_t *)&(s[3]), y1); + le_store_word32((uint8_t *)&(s[4]), x2); + le_store_word32((uint8_t *)&(s[5]), y2); + le_store_word32((uint8_t *)&(s[6]), x3); + le_store_word32((uint8_t *)&(s[7]), y3); +#endif +} + +void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) +{ + uint32_t x0, x1, x2, x3, x4, x5; + uint32_t y0, y1, y2, y3, y4, y5; + uint32_t tx, ty; + unsigned step; + + /* Load the SPARKLE-384 state up into local variables */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + x0 = s[0]; + y0 = s[1]; + x1 = s[2]; + y1 = s[3]; + x2 = s[4]; + y2 = s[5]; + x3 = s[6]; + y3 = s[7]; + x4 = s[8]; + y4 = s[9]; + x5 = s[10]; + y5 = s[11]; +#else + x0 = le_load_word32((const uint8_t *)&(s[0])); + y0 = le_load_word32((const uint8_t *)&(s[1])); + x1 = le_load_word32((const uint8_t *)&(s[2])); + y1 = le_load_word32((const uint8_t *)&(s[3])); + x2 = le_load_word32((const uint8_t *)&(s[4])); + y2 = le_load_word32((const uint8_t *)&(s[5])); + x3 = le_load_word32((const uint8_t *)&(s[6])); + y3 = le_load_word32((const uint8_t *)&(s[7])); + x4 = le_load_word32((const uint8_t *)&(s[8])); + y4 = le_load_word32((const uint8_t *)&(s[9])); + x5 = le_load_word32((const uint8_t *)&(s[10])); + y5 = le_load_word32((const uint8_t *)&(s[11])); +#endif + + /* Perform all requested steps */ + for (step = 0; step < steps; ++step) { + /* Add round constants */ + y0 ^= sparkle_rc[step]; + y1 ^= step; + + /* ARXbox layer */ + alzette(x0, y0, RC_0); + alzette(x1, y1, RC_1); + alzette(x2, y2, RC_2); + alzette(x3, y3, RC_3); + alzette(x4, y4, RC_4); + alzette(x5, y5, RC_5); + + /* Linear layer */ + tx = x0 ^ x1 ^ x2; + ty = y0 ^ y1 ^ y2; + tx = leftRotate16(tx ^ (tx << 16)); + ty = leftRotate16(ty ^ (ty << 16)); + y3 ^= tx; + y4 ^= tx; + tx ^= y5; + y5 = y2; + y2 = y3 ^ y0; + y3 = y0; + y0 = y4 ^ y1; + y4 = y1; + y1 = tx ^ y5; + x3 ^= ty; + x4 ^= ty; + ty ^= x5; + x5 = x2; + x2 = x3 ^ x0; + x3 = x0; + x0 = x4 ^ x1; + x4 = x1; + x1 = ty ^ x5; + } + + /* Write the local variables back to the SPARKLE-384 state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + s[0] = x0; + s[1] = y0; + s[2] = x1; + s[3] = y1; + s[4] = x2; + s[5] = y2; + s[6] = x3; + s[7] = y3; + s[8] = x4; + s[9] = y4; + s[10] = x5; + s[11] = y5; +#else + le_store_word32((uint8_t *)&(s[0]), x0); + le_store_word32((uint8_t *)&(s[1]), y0); + le_store_word32((uint8_t *)&(s[2]), x1); + le_store_word32((uint8_t *)&(s[3]), y1); + le_store_word32((uint8_t *)&(s[4]), x2); + le_store_word32((uint8_t *)&(s[5]), y2); + le_store_word32((uint8_t *)&(s[6]), x3); + le_store_word32((uint8_t *)&(s[7]), y3); + le_store_word32((uint8_t *)&(s[8]), x4); + le_store_word32((uint8_t *)&(s[9]), y4); + le_store_word32((uint8_t *)&(s[10]), x5); + le_store_word32((uint8_t *)&(s[11]), y5); +#endif +} + +void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) +{ + uint32_t x0, x1, x2, x3, x4, x5, x6, x7; + uint32_t y0, y1, y2, y3, y4, y5, y6, y7; + uint32_t tx, ty; + unsigned step; + + /* Load the SPARKLE-512 state up into local variables */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + x0 = s[0]; + y0 = s[1]; + x1 = s[2]; + y1 = s[3]; + x2 = s[4]; + y2 = s[5]; + x3 = s[6]; + y3 = s[7]; + x4 = s[8]; + y4 = s[9]; + x5 = s[10]; + y5 = s[11]; + x6 = s[12]; + y6 = s[13]; + x7 = s[14]; + y7 = s[15]; +#else + x0 = le_load_word32((const uint8_t *)&(s[0])); + y0 = le_load_word32((const uint8_t *)&(s[1])); + x1 = le_load_word32((const uint8_t *)&(s[2])); + y1 = le_load_word32((const uint8_t *)&(s[3])); + x2 = le_load_word32((const uint8_t *)&(s[4])); + y2 = le_load_word32((const uint8_t *)&(s[5])); + x3 = le_load_word32((const uint8_t *)&(s[6])); + y3 = le_load_word32((const uint8_t *)&(s[7])); + x4 = le_load_word32((const uint8_t *)&(s[8])); + y4 = le_load_word32((const uint8_t *)&(s[9])); + x5 = le_load_word32((const uint8_t *)&(s[10])); + y5 = le_load_word32((const uint8_t *)&(s[11])); + x6 = le_load_word32((const uint8_t *)&(s[12])); + y6 = le_load_word32((const uint8_t *)&(s[13])); + x7 = le_load_word32((const uint8_t *)&(s[14])); + y7 = le_load_word32((const uint8_t *)&(s[15])); +#endif + + /* Perform all requested steps */ + for (step = 0; step < steps; ++step) { + /* Add round constants */ + y0 ^= sparkle_rc[step]; + y1 ^= step; + + /* ARXbox layer */ + alzette(x0, y0, RC_0); + alzette(x1, y1, RC_1); + alzette(x2, y2, RC_2); + alzette(x3, y3, RC_3); + alzette(x4, y4, RC_4); + alzette(x5, y5, RC_5); + alzette(x6, y6, RC_6); + alzette(x7, y7, RC_7); + + /* Linear layer */ + tx = x0 ^ x1 ^ x2 ^ x3; + ty = y0 ^ y1 ^ y2 ^ y3; + tx = leftRotate16(tx ^ (tx << 16)); + ty = leftRotate16(ty ^ (ty << 16)); + y4 ^= tx; + y5 ^= tx; + y6 ^= tx; + tx ^= y7; + y7 = y3; + y3 = y4 ^ y0; + y4 = y0; + y0 = y5 ^ y1; + y5 = y1; + y1 = y6 ^ y2; + y6 = y2; + y2 = tx ^ y7; + x4 ^= ty; + x5 ^= ty; + x6 ^= ty; + ty ^= x7; + x7 = x3; + x3 = x4 ^ x0; + x4 = x0; + x0 = x5 ^ x1; + x5 = x1; + x1 = x6 ^ x2; + x6 = x2; + x2 = ty ^ x7; + } + + /* Write the local variables back to the SPARKLE-512 state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + s[0] = x0; + s[1] = y0; + s[2] = x1; + s[3] = y1; + s[4] = x2; + s[5] = y2; + s[6] = x3; + s[7] = y3; + s[8] = x4; + s[9] = y4; + s[10] = x5; + s[11] = y5; + s[12] = x6; + s[13] = y6; + s[14] = x7; + s[15] = y7; +#else + le_store_word32((uint8_t *)&(s[0]), x0); + le_store_word32((uint8_t *)&(s[1]), y0); + le_store_word32((uint8_t *)&(s[2]), x1); + le_store_word32((uint8_t *)&(s[3]), y1); + le_store_word32((uint8_t *)&(s[4]), x2); + le_store_word32((uint8_t *)&(s[5]), y2); + le_store_word32((uint8_t *)&(s[6]), x3); + le_store_word32((uint8_t *)&(s[7]), y3); + le_store_word32((uint8_t *)&(s[8]), x4); + le_store_word32((uint8_t *)&(s[9]), y4); + le_store_word32((uint8_t *)&(s[10]), x5); + le_store_word32((uint8_t *)&(s[11]), y5); + le_store_word32((uint8_t *)&(s[12]), x6); + le_store_word32((uint8_t *)&(s[13]), y6); + le_store_word32((uint8_t *)&(s[14]), x7); + le_store_word32((uint8_t *)&(s[15]), y7); +#endif +} + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.h new file mode 100644 index 0000000..fbdabc1 --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-sparkle.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SPARKLE_H +#define LW_INTERNAL_SPARKLE_H + +#include "internal-util.h" + +/** + * \file internal-sparkle.h + * \brief Internal implementation of the SPARKLE permutation. + * + * References: https://www.cryptolux.org/index.php/Sparkle + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the state for SPARKLE-256. + */ +#define SPARKLE_256_STATE_SIZE 8 + +/** + * \brief Size of the state for SPARKLE-384. + */ +#define SPARKLE_384_STATE_SIZE 12 + +/** + * \brief Size of the state for SPARKLE-512. + */ +#define SPARKLE_512_STATE_SIZE 16 + +/** + * \brief Performs the SPARKLE-256 permutation. + * + * \param s The words of the SPARKLE-256 state in little-endian byte order. + * \param steps The number of steps to perform, 7 or 10. + */ +void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); + +/** + * \brief Performs the SPARKLE-384 permutation. + * + * \param s The words of the SPARKLE-384 state in little-endian byte order. + * \param steps The number of steps to perform, 7 or 11. + */ +void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); + +/** + * \brief Performs the SPARKLE-512 permutation. + * + * \param s The words of the SPARKLE-512 state in little-endian byte order. + * \param steps The number of steps to perform, 8 or 12. + */ +void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-util.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.c b/sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.c new file mode 100644 index 0000000..e2aa25a --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.c @@ -0,0 +1,1135 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "sparkle.h" +#include "internal-sparkle.h" +#include + +aead_cipher_t const schwaemm_256_128_cipher = { + "Schwaemm256-128", + SCHWAEMM_256_128_KEY_SIZE, + SCHWAEMM_256_128_NONCE_SIZE, + SCHWAEMM_256_128_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_256_128_aead_encrypt, + schwaemm_256_128_aead_decrypt +}; + +aead_cipher_t const schwaemm_192_192_cipher = { + "Schwaemm192-192", + SCHWAEMM_192_192_KEY_SIZE, + SCHWAEMM_192_192_NONCE_SIZE, + SCHWAEMM_192_192_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_192_192_aead_encrypt, + schwaemm_192_192_aead_decrypt +}; + +aead_cipher_t const schwaemm_128_128_cipher = { + "Schwaemm128-128", + SCHWAEMM_128_128_KEY_SIZE, + SCHWAEMM_128_128_NONCE_SIZE, + SCHWAEMM_128_128_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_128_128_aead_encrypt, + schwaemm_128_128_aead_decrypt +}; + +aead_cipher_t const schwaemm_256_256_cipher = { + "Schwaemm256-256", + SCHWAEMM_256_256_KEY_SIZE, + SCHWAEMM_256_256_NONCE_SIZE, + SCHWAEMM_256_256_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_256_256_aead_encrypt, + schwaemm_256_256_aead_decrypt +}; + +aead_hash_algorithm_t const esch_256_hash_algorithm = { + "Esch256", + sizeof(esch_256_hash_state_t), + ESCH_256_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + esch_256_hash, + (aead_hash_init_t)esch_256_hash_init, + (aead_hash_update_t)esch_256_hash_update, + (aead_hash_finalize_t)esch_256_hash_finalize, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const esch_384_hash_algorithm = { + "Esch384", + sizeof(esch_384_hash_state_t), + ESCH_384_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + esch_384_hash, + (aead_hash_init_t)esch_384_hash_init, + (aead_hash_update_t)esch_384_hash_update, + (aead_hash_finalize_t)esch_384_hash_finalize, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \def DOMAIN(value) + * \brief Build a domain separation value as a 32-bit word. + * + * \param value The base value. + * \return The domain separation value as a 32-bit word. + */ +#if defined(LW_UTIL_LITTLE_ENDIAN) +#define DOMAIN(value) (((uint32_t)(value)) << 24) +#else +#define DOMAIN(value) (value) +#endif + +/** + * \brief Rate at which bytes are processed by Schwaemm256-128. + */ +#define SCHWAEMM_256_128_RATE 32 + +/** + * \brief Pointer to the left of the state for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_RIGHT(s) \ + (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. + * + * \param s SPARKLE-384 state. + */ +#define schwaemm_256_128_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[8]; \ + t = s[1]; \ + s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[9]; \ + t = s[2]; \ + s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[10]; \ + t = s[3]; \ + s[3] = s[7] ^ s[11]; \ + s[7] ^= t ^ s[11]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm256-128. + * + * \param s SPARKLE-384 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_256_128_authenticate + (uint32_t s[SPARKLE_384_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_256_128_RATE) { + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); + sparkle_384(s, 7); + ad += SCHWAEMM_256_128_RATE; + adlen -= SCHWAEMM_256_128_RATE; + } + if (adlen == SCHWAEMM_256_128_RATE) { + s[11] ^= DOMAIN(0x05); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[11] ^= DOMAIN(0x04); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); +} + +int schwaemm_256_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + uint8_t block[SCHWAEMM_256_128_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); + memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_128_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + sparkle_384(s, 7); + memcpy(c, block, SCHWAEMM_256_128_RATE); + c += SCHWAEMM_256_128_RATE; + m += SCHWAEMM_256_128_RATE; + mlen -= SCHWAEMM_256_128_RATE; + } + if (mlen == SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + memcpy(c, block, SCHWAEMM_256_128_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_384(s, 11); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); + return 0; +} + +int schwaemm_256_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_256_128_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); + memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_128_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_256_128_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + sparkle_384(s, 7); + c += SCHWAEMM_256_128_RATE; + m += SCHWAEMM_256_128_RATE; + clen -= SCHWAEMM_256_128_RATE; + } + if (clen == SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Schwaemm192-192. + */ +#define SCHWAEMM_192_192_RATE 24 + +/** + * \brief Pointer to the left of the state for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_RIGHT(s) \ + (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. + * + * \param s SPARKLE-384 state. + */ +#define schwaemm_192_192_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[3] ^ s[6]; \ + s[3] ^= t ^ s[9]; \ + t = s[1]; \ + s[1] = s[4] ^ s[7]; \ + s[4] ^= t ^ s[10]; \ + t = s[2]; \ + s[2] = s[5] ^ s[8]; \ + s[5] ^= t ^ s[11]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm192-192. + * + * \param s SPARKLE-384 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_192_192_authenticate + (uint32_t s[SPARKLE_384_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_192_192_RATE) { + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); + sparkle_384(s, 7); + ad += SCHWAEMM_192_192_RATE; + adlen -= SCHWAEMM_192_192_RATE; + } + if (adlen == SCHWAEMM_192_192_RATE) { + s[11] ^= DOMAIN(0x09); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[11] ^= DOMAIN(0x08); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); +} + +int schwaemm_192_192_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + uint8_t block[SCHWAEMM_192_192_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); + memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_192_192_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + sparkle_384(s, 7); + memcpy(c, block, SCHWAEMM_192_192_RATE); + c += SCHWAEMM_192_192_RATE; + m += SCHWAEMM_192_192_RATE; + mlen -= SCHWAEMM_192_192_RATE; + } + if (mlen == SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + memcpy(c, block, SCHWAEMM_192_192_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_384(s, 11); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); + return 0; +} + +int schwaemm_192_192_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_192_192_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); + memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_192_192_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_192_192_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + sparkle_384(s, 7); + c += SCHWAEMM_192_192_RATE; + m += SCHWAEMM_192_192_RATE; + clen -= SCHWAEMM_192_192_RATE; + } + if (clen == SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Schwaemm128-128. + */ +#define SCHWAEMM_128_128_RATE 16 + +/** + * \brief Pointer to the left of the state for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_RIGHT(s) \ + (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. + * + * \param s SPARKLE-256 state. + */ +#define schwaemm_128_128_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[2] ^ s[4]; \ + s[2] ^= t ^ s[6]; \ + t = s[1]; \ + s[1] = s[3] ^ s[5]; \ + s[3] ^= t ^ s[7]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm128-128. + * + * \param s SPARKLE-256 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_128_128_authenticate + (uint32_t s[SPARKLE_256_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_128_128_RATE) { + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); + sparkle_256(s, 7); + ad += SCHWAEMM_128_128_RATE; + adlen -= SCHWAEMM_128_128_RATE; + } + if (adlen == SCHWAEMM_128_128_RATE) { + s[7] ^= DOMAIN(0x05); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[7] ^= DOMAIN(0x04); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_256(s, 10); +} + +int schwaemm_128_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_256_STATE_SIZE]; + uint8_t block[SCHWAEMM_128_128_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); + memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); + sparkle_256(s, 10); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_128_128_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + sparkle_256(s, 7); + memcpy(c, block, SCHWAEMM_128_128_RATE); + c += SCHWAEMM_128_128_RATE; + m += SCHWAEMM_128_128_RATE; + mlen -= SCHWAEMM_128_128_RATE; + } + if (mlen == SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + memcpy(c, block, SCHWAEMM_128_128_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_256(s, 10); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); + return 0; +} + +int schwaemm_128_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_256_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_128_128_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); + memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); + sparkle_256(s, 10); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_128_128_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_128_128_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + sparkle_256(s, 7); + c += SCHWAEMM_128_128_RATE; + m += SCHWAEMM_128_128_RATE; + clen -= SCHWAEMM_128_128_RATE; + } + if (clen == SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_256(s, 10); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Schwaemm256-256. + */ +#define SCHWAEMM_256_256_RATE 32 + +/** + * \brief Pointer to the left of the state for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_RIGHT(s) \ + (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. + * + * \param s SPARKLE-512 state. + */ +#define schwaemm_256_256_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[12]; \ + t = s[1]; \ + s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[13]; \ + t = s[2]; \ + s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[14]; \ + t = s[3]; \ + s[3] = s[7] ^ s[11]; \ + s[7] ^= t ^ s[15]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm256-256. + * + * \param s SPARKLE-512 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_256_256_authenticate + (uint32_t s[SPARKLE_512_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_256_256_RATE) { + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); + sparkle_512(s, 8); + ad += SCHWAEMM_256_256_RATE; + adlen -= SCHWAEMM_256_256_RATE; + } + if (adlen == SCHWAEMM_256_256_RATE) { + s[15] ^= DOMAIN(0x11); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[15] ^= DOMAIN(0x10); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_512(s, 12); +} + +int schwaemm_256_256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_512_STATE_SIZE]; + uint8_t block[SCHWAEMM_256_256_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); + memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); + sparkle_512(s, 12); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_256_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + sparkle_512(s, 8); + memcpy(c, block, SCHWAEMM_256_256_RATE); + c += SCHWAEMM_256_256_RATE; + m += SCHWAEMM_256_256_RATE; + mlen -= SCHWAEMM_256_256_RATE; + } + if (mlen == SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + memcpy(c, block, SCHWAEMM_256_256_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_512(s, 12); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); + return 0; +} + +int schwaemm_256_256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_512_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_256_256_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); + memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); + sparkle_512(s, 12); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_256_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_256_256_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + sparkle_512(s, 8); + c += SCHWAEMM_256_256_RATE; + m += SCHWAEMM_256_256_RATE; + clen -= SCHWAEMM_256_256_RATE; + } + if (clen == SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_512(s, 12); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Esch256. + */ +#define ESCH_256_RATE 16 + +/** + * \brief Perform the M3 step for Esch256 to mix the input with the state. + * + * \param s SPARKLE-384 state. + * \param block Block of input data that has been padded to the rate. + * \param domain Domain separator for this phase. + */ +#define esch_256_m3(s, block, domain) \ + do { \ + uint32_t tx = (block)[0] ^ (block)[2]; \ + uint32_t ty = (block)[1] ^ (block)[3]; \ + tx = leftRotate16(tx ^ (tx << 16)); \ + ty = leftRotate16(ty ^ (ty << 16)); \ + s[0] ^= (block)[0] ^ ty; \ + s[1] ^= (block)[1] ^ tx; \ + s[2] ^= (block)[2] ^ ty; \ + s[3] ^= (block)[3] ^ tx; \ + if ((domain) != 0) \ + s[5] ^= DOMAIN(domain); \ + s[4] ^= ty; \ + s[5] ^= tx; \ + } while (0) + +/** @cond esch_256 */ + +/** + * \brief Word-based state for the Esch256 incremental hash mode. + */ +typedef union +{ + struct { + uint32_t state[SPARKLE_384_STATE_SIZE]; + uint32_t block[4]; + unsigned char count; + } s; + unsigned long long align; + +} esch_256_hash_state_wt; + +/** @endcond */ + +int esch_256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + uint32_t block[ESCH_256_RATE / 4]; + memset(s, 0, sizeof(s)); + while (inlen > ESCH_256_RATE) { + memcpy(block, in, ESCH_256_RATE); + esch_256_m3(s, block, 0x00); + sparkle_384(s, 7); + in += ESCH_256_RATE; + inlen -= ESCH_256_RATE; + } + if (inlen == ESCH_256_RATE) { + memcpy(block, in, ESCH_256_RATE); + esch_256_m3(s, block, 0x02); + } else { + unsigned temp = (unsigned)inlen; + memcpy(block, in, temp); + ((unsigned char *)block)[temp] = 0x80; + memset(((unsigned char *)block) + temp + 1, 0, + ESCH_256_RATE - temp - 1); + esch_256_m3(s, block, 0x01); + } + sparkle_384(s, 11); + memcpy(out, s, ESCH_256_RATE); + sparkle_384(s, 7); + memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); + return 0; +} + +void esch_256_hash_init(esch_256_hash_state_t *state) +{ + memset(state, 0, sizeof(esch_256_hash_state_t)); +} + +void esch_256_hash_update + (esch_256_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; + unsigned temp; + while (inlen > 0) { + if (st->s.count == ESCH_256_RATE) { + esch_256_m3(st->s.state, st->s.block, 0x00); + sparkle_384(st->s.state, 7); + st->s.count = 0; + } + temp = ESCH_256_RATE - st->s.count; + if (temp > inlen) + temp = (unsigned)inlen; + memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); + st->s.count += temp; + in += temp; + inlen -= temp; + } +} + +void esch_256_hash_finalize + (esch_256_hash_state_t *state, unsigned char *out) +{ + esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; + + /* Pad and process the last block */ + if (st->s.count == ESCH_256_RATE) { + esch_256_m3(st->s.state, st->s.block, 0x02); + } else { + unsigned temp = st->s.count; + ((unsigned char *)(st->s.block))[temp] = 0x80; + memset(((unsigned char *)(st->s.block)) + temp + 1, 0, + ESCH_256_RATE - temp - 1); + esch_256_m3(st->s.state, st->s.block, 0x01); + } + sparkle_384(st->s.state, 11); + + /* Generate the final hash value */ + memcpy(out, st->s.state, ESCH_256_RATE); + sparkle_384(st->s.state, 7); + memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); +} + +/** + * \brief Rate at which bytes are processed by Esch384. + */ +#define ESCH_384_RATE 16 + +/** + * \brief Perform the M4 step for Esch384 to mix the input with the state. + * + * \param s SPARKLE-512 state. + * \param block Block of input data that has been padded to the rate. + * \param domain Domain separator for this phase. + */ +#define esch_384_m4(s, block, domain) \ + do { \ + uint32_t tx = block[0] ^ block[2]; \ + uint32_t ty = block[1] ^ block[3]; \ + tx = leftRotate16(tx ^ (tx << 16)); \ + ty = leftRotate16(ty ^ (ty << 16)); \ + s[0] ^= block[0] ^ ty; \ + s[1] ^= block[1] ^ tx; \ + s[2] ^= block[2] ^ ty; \ + s[3] ^= block[3] ^ tx; \ + if ((domain) != 0) \ + s[7] ^= DOMAIN(domain); \ + s[4] ^= ty; \ + s[5] ^= tx; \ + s[6] ^= ty; \ + s[7] ^= tx; \ + } while (0) + +/** @cond esch_384 */ + +/** + * \brief Word-based state for the Esch384 incremental hash mode. + */ +typedef union +{ + struct { + uint32_t state[SPARKLE_512_STATE_SIZE]; + uint32_t block[4]; + unsigned char count; + } s; + unsigned long long align; + +} esch_384_hash_state_wt; + +/** @endcond */ + +int esch_384_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + uint32_t s[SPARKLE_512_STATE_SIZE]; + uint32_t block[ESCH_256_RATE / 4]; + memset(s, 0, sizeof(s)); + while (inlen > ESCH_384_RATE) { + memcpy(block, in, ESCH_384_RATE); + esch_384_m4(s, block, 0x00); + sparkle_512(s, 8); + in += ESCH_384_RATE; + inlen -= ESCH_384_RATE; + } + if (inlen == ESCH_384_RATE) { + memcpy(block, in, ESCH_384_RATE); + esch_384_m4(s, block, 0x02); + } else { + unsigned temp = (unsigned)inlen; + memcpy(block, in, temp); + ((unsigned char *)block)[temp] = 0x80; + memset(((unsigned char *)block) + temp + 1, 0, + ESCH_384_RATE - temp - 1); + esch_384_m4(s, block, 0x01); + } + sparkle_512(s, 12); + memcpy(out, s, ESCH_384_RATE); + sparkle_512(s, 8); + memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); + sparkle_512(s, 8); + memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); + return 0; +} + +void esch_384_hash_init(esch_384_hash_state_t *state) +{ + memset(state, 0, sizeof(esch_384_hash_state_t)); +} + +void esch_384_hash_update + (esch_384_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; + unsigned temp; + while (inlen > 0) { + if (st->s.count == ESCH_384_RATE) { + esch_384_m4(st->s.state, st->s.block, 0x00); + sparkle_512(st->s.state, 8); + st->s.count = 0; + } + temp = ESCH_384_RATE - st->s.count; + if (temp > inlen) + temp = (unsigned)inlen; + memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); + st->s.count += temp; + in += temp; + inlen -= temp; + } +} + +void esch_384_hash_finalize + (esch_384_hash_state_t *state, unsigned char *out) +{ + esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; + + /* Pad and process the last block */ + if (st->s.count == ESCH_384_RATE) { + esch_384_m4(st->s.state, st->s.block, 0x02); + } else { + unsigned temp = st->s.count; + ((unsigned char *)(st->s.block))[temp] = 0x80; + memset(((unsigned char *)(st->s.block)) + temp + 1, 0, + ESCH_384_RATE - temp - 1); + esch_384_m4(st->s.state, st->s.block, 0x01); + } + sparkle_512(st->s.state, 12); + + /* Generate the final hash value */ + memcpy(out, st->s.state, ESCH_384_RATE); + sparkle_512(st->s.state, 8); + memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); + sparkle_512(st->s.state, 8); + memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); +} diff --git a/sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.h b/sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.h new file mode 100644 index 0000000..dd0999e --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch256v1/rhys/sparkle.h @@ -0,0 +1,515 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_SPARKLE_H +#define LWCRYPTO_SPARKLE_H + +#include "aead-common.h" + +/** + * \file sparkle.h + * \brief Encryption and hash algorithms based on the SPARKLE permutation. + * + * SPARKLE is a family of encryption and hash algorithms that are based + * around the SPARKLE permutation. There are three versions of the + * permutation with 256-bit, 384-bit, and 512-bit state sizes. + * The algorithms in the family are: + * + * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. + * This is the primary encryption algorithm in the family. + * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. + * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. + * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. + * \li Esch256 hash algorithm with a 256-bit digest output. This is the + * primary hash algorithm in the family. + * \li Esch384 hash algorithm with a 384-bit digest output. + * + * References: https://www.cryptolux.org/index.php/Sparkle + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_NONCE_SIZE 32 + +/** + * \brief Size of the key for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_KEY_SIZE 24 + +/** + * \brief Size of the authentication tag for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_TAG_SIZE 24 + +/** + * \brief Size of the nonce for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_NONCE_SIZE 24 + +/** + * \brief Size of the key for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_NONCE_SIZE 16 + +/** + * \brief Size of the key for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_NONCE_SIZE 32 + +/** + * \brief Size of the hash output for Esch256. + */ +#define ESCH_256_HASH_SIZE 32 + +/** + * \brief Size of the hash output for Esch384. + */ +#define ESCH_384_HASH_SIZE 48 + +/** + * \brief Meta-information block for the Schwaemm256-128 cipher. + */ +extern aead_cipher_t const schwaemm_256_128_cipher; + +/** + * \brief Meta-information block for the Schwaemm192-192 cipher. + */ +extern aead_cipher_t const schwaemm_192_192_cipher; + +/** + * \brief Meta-information block for the Schwaemm128-128 cipher. + */ +extern aead_cipher_t const schwaemm_128_128_cipher; + +/** + * \brief Meta-information block for the Schwaemm256-256 cipher. + */ +extern aead_cipher_t const schwaemm_256_256_cipher; + +/** + * \brief Meta-information block for the Esch256 hash algorithm. + */ +extern aead_hash_algorithm_t const esch_256_hash_algorithm; + +/** + * \brief Meta-information block for the Esch384 hash algorithm. + */ +extern aead_hash_algorithm_t const esch_384_hash_algorithm; + +/** + * \brief State information for the Esch256 incremental hash mode. + */ +typedef union +{ + struct { + unsigned char state[48]; /**< Current hash state */ + unsigned char block[16]; /**< Partial input data block */ + unsigned char count; /**< Number of bytes in the current block */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} esch_256_hash_state_t; + +/** + * \brief State information for the Esch384 incremental hash mode. + */ +typedef union +{ + struct { + unsigned char state[64]; /**< Current hash state */ + unsigned char block[16]; /**< Partial input data block */ + unsigned char count; /**< Number of bytes in the current block */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} esch_384_hash_state_t; + +/** + * \brief Encrypts and authenticates a packet with Schwaemm256-128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 32 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_256_128_aead_decrypt() + */ +int schwaemm_256_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm256-128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 32 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_256_128_aead_encrypt() + */ +int schwaemm_256_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with Schwaemm192-192. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 24 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 24 bytes in length. + * \param k Points to the 24 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_192_192_aead_decrypt() + */ +int schwaemm_192_192_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm192-192. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 24 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 24 bytes in length. + * \param k Points to the 24 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_192_192_aead_encrypt() + */ +int schwaemm_192_192_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with Schwaemm128-128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_128_128_aead_decrypt() + */ +int schwaemm_128_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm128-128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_128_128_aead_encrypt() + */ +int schwaemm_128_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with Schwaemm256-256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_256_256_aead_decrypt() + */ +int schwaemm_256_256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm256-256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_256_256_aead_encrypt() + */ +int schwaemm_256_256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with Esch256 to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * ESCH_256_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int esch_256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an Esch256 hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() + */ +void esch_256_hash_init(esch_256_hash_state_t *state); + +/** + * \brief Updates an Esch256 state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa esch_256_hash_init(), esch_256_hash_finalize() + */ +void esch_256_hash_update + (esch_256_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an Esch256 hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 32-byte hash value. + * + * \sa esch_256_hash_init(), esch_256_hash_update() + */ +void esch_256_hash_finalize + (esch_256_hash_state_t *state, unsigned char *out); + +/** + * \brief Hashes a block of input data with Esch384 to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * ESCH_384_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int esch_384_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an Esch384 hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() + */ +void esch_384_hash_init(esch_384_hash_state_t *state); + +/** + * \brief Updates an Esch384 state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa esch_384_hash_init(), esch_384_hash_finalize() + */ +void esch_384_hash_update + (esch_384_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an Esch384 hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 48-byte hash value. + * + * \sa esch_384_hash_init(), esch_384_hash_update() + */ +void esch_384_hash_finalize + (esch_384_hash_state_t *state, unsigned char *out); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/api.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/api.h deleted file mode 100644 index d507385..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 48 diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/hash.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/hash.c deleted file mode 100644 index 9acc9f9..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "sparkle.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return esch_384_hash(out, in, inlen); -} diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle-avr.S b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle-avr.S deleted file mode 100644 index 753ea2f..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle-avr.S +++ /dev/null @@ -1,2887 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global sparkle_256 - .type sparkle_256, @function -sparkle_256: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 129f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 129f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 129f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 129f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 129f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 129f - pop r18 - cpi r18,7 - brne 5094f - rjmp 615f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 129f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 129f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 129f - rjmp 615f -129: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - movw r18,r4 - movw r20,r6 - movw r4,r14 - movw r6,r12 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - movw r8,r18 - movw r10,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - ld r18,Z - ldd r19,Z+1 - ldd r20,Z+2 - ldd r21,Z+3 - movw r14,r22 - movw r12,r26 - eor r14,r18 - eor r15,r19 - eor r12,r20 - eor r13,r21 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - movw r22,r16 - movw r26,r24 - eor r22,r28 - eor r23,r29 - eor r26,r2 - eor r27,r3 - movw r28,r14 - movw r2,r12 - ret -615: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_256, .-sparkle_256 - - .text -.global sparkle_384 - .type sparkle_384, @function -sparkle_384: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 140f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 140f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 140f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 140f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 140f - pop r18 - cpi r18,7 - brne 5094f - rjmp 886f -5094: - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 140f - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 140f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 140f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 140f - rjmp 886f -140: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - ldd r18,Z+28 - ldd r19,Z+29 - ldd r20,Z+30 - ldd r21,Z+31 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+20 - ldd r9,Z+21 - ldd r10,Z+22 - ldd r11,Z+23 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r0,Z+4 - eor r18,r0 - ldd r0,Z+5 - eor r19,r0 - ldd r0,Z+6 - eor r20,r0 - ldd r0,Z+7 - eor r21,r0 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - ldd r18,Z+4 - ldd r19,Z+5 - ldd r20,Z+6 - ldd r21,Z+7 - std Z+28,r18 - std Z+29,r19 - std Z+30,r20 - std Z+31,r21 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - std Z+36,r18 - std Z+37,r19 - std Z+38,r20 - std Z+39,r21 - eor r8,r14 - eor r9,r15 - eor r10,r12 - eor r11,r13 - ldd r18,Z+24 - ldd r19,Z+25 - ldd r20,Z+26 - ldd r21,Z+27 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r28,Z+16 - ldd r29,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+24,r14 - std Z+25,r15 - std Z+26,r12 - std Z+27,r13 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - std Z+32,r18 - std Z+33,r19 - std Z+34,r20 - std Z+35,r21 - eor r28,r16 - eor r29,r17 - eor r2,r24 - eor r3,r25 - ret -886: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_384, .-sparkle_384 - - .text -.global sparkle_512 - .type sparkle_512, @function -sparkle_512: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - push r22 - ld r22,Z - ldd r23,Z+1 - ldd r26,Z+2 - ldd r27,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r28,Z+8 - ldd r29,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,1 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,2 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,3 - eor r8,r18 - rcall 151f - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,4 - eor r8,r18 - rcall 151f - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,5 - eor r8,r18 - rcall 151f - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,6 - eor r8,r18 - rcall 151f - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,7 - eor r8,r18 - rcall 151f - pop r18 - cpi r18,8 - brne 5105f - rjmp 1189f -5105: - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,8 - eor r8,r18 - rcall 151f - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,9 - eor r8,r18 - rcall 151f - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,10 - eor r8,r18 - rcall 151f - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,11 - eor r8,r18 - rcall 151f - rjmp 1189f -151: - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,98 - ldi r19,81 - ldi r20,225 - ldi r21,183 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,128 - ldi r19,88 - ldi r20,113 - ldi r21,191 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - movw r12,r22 - movw r14,r26 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - movw r24,r4 - movw r16,r6 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - ldd r28,Z+24 - ldd r29,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,86 - ldi r19,218 - ldi r20,180 - ldi r21,56 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,56 - ldi r19,119 - ldi r20,78 - ldi r21,50 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r22 - std Z+17,r23 - std Z+18,r26 - std Z+19,r27 - std Z+20,r4 - std Z+21,r5 - std Z+22,r6 - std Z+23,r7 - std Z+24,r28 - std Z+25,r29 - std Z+26,r2 - std Z+27,r3 - std Z+28,r8 - std Z+29,r9 - std Z+30,r10 - std Z+31,r11 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - eor r12,r28 - eor r13,r29 - eor r14,r2 - eor r15,r3 - eor r24,r4 - eor r25,r5 - eor r16,r6 - eor r17,r7 - eor r24,r8 - eor r25,r9 - eor r16,r10 - eor r17,r11 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - ldd r28,Z+40 - ldd r29,Z+41 - ldd r2,Z+42 - ldd r3,Z+43 - ldd r8,Z+44 - ldd r9,Z+45 - ldd r10,Z+46 - ldd r11,Z+47 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,235 - ldi r19,133 - ldi r20,17 - ldi r21,187 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,87 - ldi r19,123 - ldi r20,124 - ldi r21,79 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - std Z+32,r22 - std Z+33,r23 - std Z+34,r26 - std Z+35,r27 - std Z+36,r4 - std Z+37,r5 - std Z+38,r6 - std Z+39,r7 - std Z+40,r28 - std Z+41,r29 - std Z+42,r2 - std Z+43,r3 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r22,Z+48 - ldd r23,Z+49 - ldd r26,Z+50 - ldd r27,Z+51 - ldd r4,Z+52 - ldd r5,Z+53 - ldd r6,Z+54 - ldd r7,Z+55 - ldd r28,Z+56 - ldd r29,Z+57 - ldd r2,Z+58 - ldd r3,Z+59 - ldd r8,Z+60 - ldd r9,Z+61 - ldd r10,Z+62 - ldd r11,Z+63 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r22,r18 - adc r23,r19 - adc r26,r20 - adc r27,r21 - eor r4,r27 - eor r5,r22 - eor r6,r23 - eor r7,r26 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r4 - movw r20,r6 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r22,r20 - adc r23,r21 - adc r26,r18 - adc r27,r19 - movw r18,r22 - movw r20,r26 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r4,r20 - eor r5,r21 - eor r6,r18 - eor r7,r19 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r4 - adc r23,r5 - adc r26,r6 - adc r27,r7 - movw r18,r22 - movw r20,r26 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r18,200 - ldi r19,161 - ldi r20,191 - ldi r21,207 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - add r22,r7 - adc r23,r4 - adc r26,r5 - adc r27,r6 - eor r4,r26 - eor r5,r27 - eor r6,r22 - eor r7,r23 - eor r22,r18 - eor r23,r19 - eor r26,r20 - eor r27,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - add r28,r18 - adc r29,r19 - adc r2,r20 - adc r3,r21 - eor r8,r3 - eor r9,r28 - eor r10,r29 - eor r11,r2 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - movw r18,r8 - movw r20,r10 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - add r28,r20 - adc r29,r21 - adc r2,r18 - adc r3,r19 - movw r18,r28 - movw r20,r2 - bst r18,0 - lsr r21 - ror r20 - ror r19 - ror r18 - bld r21,7 - eor r8,r20 - eor r9,r21 - eor r10,r18 - eor r11,r19 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r8 - adc r29,r9 - adc r2,r10 - adc r3,r11 - movw r18,r28 - movw r20,r2 - lsl r18 - rol r19 - rol r20 - rol r21 - adc r18,r1 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ldi r18,61 - ldi r19,41 - ldi r20,179 - ldi r21,194 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - add r28,r11 - adc r29,r8 - adc r2,r9 - adc r3,r10 - eor r8,r2 - eor r9,r3 - eor r10,r28 - eor r11,r29 - eor r28,r18 - eor r29,r19 - eor r2,r20 - eor r3,r21 - eor r14,r12 - eor r15,r13 - eor r16,r24 - eor r17,r25 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r4,Z+36 - ldd r5,Z+37 - ldd r6,Z+38 - ldd r7,Z+39 - eor r4,r14 - eor r5,r15 - eor r6,r12 - eor r7,r13 - ldd r18,Z+44 - ldd r19,Z+45 - ldd r20,Z+46 - ldd r21,Z+47 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - eor r14,r8 - eor r15,r9 - eor r12,r10 - eor r13,r11 - ldd r8,Z+28 - ldd r9,Z+29 - ldd r10,Z+30 - ldd r11,Z+31 - std Z+60,r8 - std Z+61,r9 - std Z+62,r10 - std Z+63,r11 - ldd r8,Z+4 - ldd r9,Z+5 - ldd r10,Z+6 - ldd r11,Z+7 - eor r4,r8 - eor r5,r9 - eor r6,r10 - eor r7,r11 - std Z+28,r4 - std Z+29,r5 - std Z+30,r6 - std Z+31,r7 - std Z+36,r8 - std Z+37,r9 - std Z+38,r10 - std Z+39,r11 - ldd r8,Z+12 - ldd r9,Z+13 - ldd r10,Z+14 - ldd r11,Z+15 - eor r18,r8 - eor r19,r9 - eor r20,r10 - eor r21,r11 - std Z+44,r8 - std Z+45,r9 - std Z+46,r10 - std Z+47,r11 - ldd r8,Z+52 - ldd r9,Z+53 - ldd r10,Z+54 - ldd r11,Z+55 - ldd r4,Z+20 - ldd r5,Z+21 - ldd r6,Z+22 - ldd r7,Z+23 - eor r8,r4 - eor r9,r5 - eor r10,r6 - eor r11,r7 - std Z+52,r4 - std Z+53,r5 - std Z+54,r6 - std Z+55,r7 - ldd r0,Z+60 - eor r14,r0 - ldd r0,Z+61 - eor r15,r0 - ldd r0,Z+62 - eor r12,r0 - ldd r0,Z+63 - eor r13,r0 - std Z+20,r14 - std Z+21,r15 - std Z+22,r12 - std Z+23,r13 - movw r4,r18 - movw r6,r20 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - std Z+48,r22 - std Z+49,r23 - std Z+50,r26 - std Z+51,r27 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r26,Z+34 - ldd r27,Z+35 - eor r22,r16 - eor r23,r17 - eor r26,r24 - eor r27,r25 - ldd r18,Z+40 - ldd r19,Z+41 - ldd r20,Z+42 - ldd r21,Z+43 - eor r18,r16 - eor r19,r17 - eor r20,r24 - eor r21,r25 - eor r16,r28 - eor r17,r29 - eor r24,r2 - eor r25,r3 - ldd r14,Z+24 - ldd r15,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+56,r14 - std Z+57,r15 - std Z+58,r12 - std Z+59,r13 - ld r14,Z - ldd r15,Z+1 - ldd r12,Z+2 - ldd r13,Z+3 - eor r22,r14 - eor r23,r15 - eor r26,r12 - eor r27,r13 - std Z+24,r22 - std Z+25,r23 - std Z+26,r26 - std Z+27,r27 - std Z+32,r14 - std Z+33,r15 - std Z+34,r12 - std Z+35,r13 - ldd r14,Z+8 - ldd r15,Z+9 - ldd r12,Z+10 - ldd r13,Z+11 - eor r18,r14 - eor r19,r15 - eor r20,r12 - eor r21,r13 - movw r22,r18 - movw r26,r20 - std Z+40,r14 - std Z+41,r15 - std Z+42,r12 - std Z+43,r13 - ldd r28,Z+48 - ldd r29,Z+49 - ldd r2,Z+50 - ldd r3,Z+51 - ldd r14,Z+16 - ldd r15,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - eor r28,r14 - eor r29,r15 - eor r2,r12 - eor r3,r13 - std Z+48,r14 - std Z+49,r15 - std Z+50,r12 - std Z+51,r13 - ldd r0,Z+56 - eor r16,r0 - ldd r0,Z+57 - eor r17,r0 - ldd r0,Z+58 - eor r24,r0 - ldd r0,Z+59 - eor r25,r0 - std Z+16,r16 - std Z+17,r17 - std Z+18,r24 - std Z+19,r25 - ret -1189: - st Z,r22 - std Z+1,r23 - std Z+2,r26 - std Z+3,r27 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r28 - std Z+9,r29 - std Z+10,r2 - std Z+11,r3 - std Z+12,r8 - std Z+13,r9 - std Z+14,r10 - std Z+15,r11 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sparkle_512, .-sparkle_512 - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.c deleted file mode 100644 index 4a4c0fb..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.c +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sparkle.h" - -#if !defined(__AVR__) - -/* The 8 basic round constants from the specification */ -#define RC_0 0xB7E15162 -#define RC_1 0xBF715880 -#define RC_2 0x38B4DA56 -#define RC_3 0x324E7738 -#define RC_4 0xBB1185EB -#define RC_5 0x4F7C7B57 -#define RC_6 0xCFBFA1C8 -#define RC_7 0xC2B3293D - -/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ -static uint32_t const sparkle_rc[12] = { - RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, - RC_0, RC_1, RC_2, RC_3 -}; - -/** - * \brief Alzette block cipher that implements the ARXbox layer of the - * SPARKLE permutation. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param k 32-bit round key. - */ -#define alzette(x, y, k) \ - do { \ - (x) += leftRotate1((y)); \ - (y) ^= leftRotate8((x)); \ - (x) ^= (k); \ - (x) += leftRotate15((y)); \ - (y) ^= leftRotate15((x)); \ - (x) ^= (k); \ - (x) += (y); \ - (y) ^= leftRotate1((x)); \ - (x) ^= (k); \ - (x) += leftRotate8((y)); \ - (y) ^= leftRotate16((x)); \ - (x) ^= (k); \ - } while (0) - -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3; - uint32_t y0, y1, y2, y3; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-256 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - - /* Linear layer */ - tx = x0 ^ x1; - ty = y0 ^ y1; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y2 ^= tx; - tx ^= y3; - y3 = y1; - y1 = y2 ^ y0; - y2 = y0; - y0 = tx ^ y3; - x2 ^= ty; - ty ^= x3; - x3 = x1; - x1 = x2 ^ x0; - x2 = x0; - x0 = ty ^ x3; - } - - /* Write the local variables back to the SPARKLE-256 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); -#endif -} - -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5; - uint32_t y0, y1, y2, y3, y4, y5; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-384 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2; - ty = y0 ^ y1 ^ y2; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y3 ^= tx; - y4 ^= tx; - tx ^= y5; - y5 = y2; - y2 = y3 ^ y0; - y3 = y0; - y0 = y4 ^ y1; - y4 = y1; - y1 = tx ^ y5; - x3 ^= ty; - x4 ^= ty; - ty ^= x5; - x5 = x2; - x2 = x3 ^ x0; - x3 = x0; - x0 = x4 ^ x1; - x4 = x1; - x1 = ty ^ x5; - } - - /* Write the local variables back to the SPARKLE-384 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); -#endif -} - -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t y0, y1, y2, y3, y4, y5, y6, y7; - uint32_t tx, ty; - unsigned step; - - /* Load the SPARKLE-512 state up into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x0 = s[0]; - y0 = s[1]; - x1 = s[2]; - y1 = s[3]; - x2 = s[4]; - y2 = s[5]; - x3 = s[6]; - y3 = s[7]; - x4 = s[8]; - y4 = s[9]; - x5 = s[10]; - y5 = s[11]; - x6 = s[12]; - y6 = s[13]; - x7 = s[14]; - y7 = s[15]; -#else - x0 = le_load_word32((const uint8_t *)&(s[0])); - y0 = le_load_word32((const uint8_t *)&(s[1])); - x1 = le_load_word32((const uint8_t *)&(s[2])); - y1 = le_load_word32((const uint8_t *)&(s[3])); - x2 = le_load_word32((const uint8_t *)&(s[4])); - y2 = le_load_word32((const uint8_t *)&(s[5])); - x3 = le_load_word32((const uint8_t *)&(s[6])); - y3 = le_load_word32((const uint8_t *)&(s[7])); - x4 = le_load_word32((const uint8_t *)&(s[8])); - y4 = le_load_word32((const uint8_t *)&(s[9])); - x5 = le_load_word32((const uint8_t *)&(s[10])); - y5 = le_load_word32((const uint8_t *)&(s[11])); - x6 = le_load_word32((const uint8_t *)&(s[12])); - y6 = le_load_word32((const uint8_t *)&(s[13])); - x7 = le_load_word32((const uint8_t *)&(s[14])); - y7 = le_load_word32((const uint8_t *)&(s[15])); -#endif - - /* Perform all requested steps */ - for (step = 0; step < steps; ++step) { - /* Add round constants */ - y0 ^= sparkle_rc[step]; - y1 ^= step; - - /* ARXbox layer */ - alzette(x0, y0, RC_0); - alzette(x1, y1, RC_1); - alzette(x2, y2, RC_2); - alzette(x3, y3, RC_3); - alzette(x4, y4, RC_4); - alzette(x5, y5, RC_5); - alzette(x6, y6, RC_6); - alzette(x7, y7, RC_7); - - /* Linear layer */ - tx = x0 ^ x1 ^ x2 ^ x3; - ty = y0 ^ y1 ^ y2 ^ y3; - tx = leftRotate16(tx ^ (tx << 16)); - ty = leftRotate16(ty ^ (ty << 16)); - y4 ^= tx; - y5 ^= tx; - y6 ^= tx; - tx ^= y7; - y7 = y3; - y3 = y4 ^ y0; - y4 = y0; - y0 = y5 ^ y1; - y5 = y1; - y1 = y6 ^ y2; - y6 = y2; - y2 = tx ^ y7; - x4 ^= ty; - x5 ^= ty; - x6 ^= ty; - ty ^= x7; - x7 = x3; - x3 = x4 ^ x0; - x4 = x0; - x0 = x5 ^ x1; - x5 = x1; - x1 = x6 ^ x2; - x6 = x2; - x2 = ty ^ x7; - } - - /* Write the local variables back to the SPARKLE-512 state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s[0] = x0; - s[1] = y0; - s[2] = x1; - s[3] = y1; - s[4] = x2; - s[5] = y2; - s[6] = x3; - s[7] = y3; - s[8] = x4; - s[9] = y4; - s[10] = x5; - s[11] = y5; - s[12] = x6; - s[13] = y6; - s[14] = x7; - s[15] = y7; -#else - le_store_word32((uint8_t *)&(s[0]), x0); - le_store_word32((uint8_t *)&(s[1]), y0); - le_store_word32((uint8_t *)&(s[2]), x1); - le_store_word32((uint8_t *)&(s[3]), y1); - le_store_word32((uint8_t *)&(s[4]), x2); - le_store_word32((uint8_t *)&(s[5]), y2); - le_store_word32((uint8_t *)&(s[6]), x3); - le_store_word32((uint8_t *)&(s[7]), y3); - le_store_word32((uint8_t *)&(s[8]), x4); - le_store_word32((uint8_t *)&(s[9]), y4); - le_store_word32((uint8_t *)&(s[10]), x5); - le_store_word32((uint8_t *)&(s[11]), y5); - le_store_word32((uint8_t *)&(s[12]), x6); - le_store_word32((uint8_t *)&(s[13]), y6); - le_store_word32((uint8_t *)&(s[14]), x7); - le_store_word32((uint8_t *)&(s[15]), y7); -#endif -} - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.h deleted file mode 100644 index fbdabc1..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-sparkle.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPARKLE_H -#define LW_INTERNAL_SPARKLE_H - -#include "internal-util.h" - -/** - * \file internal-sparkle.h - * \brief Internal implementation of the SPARKLE permutation. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for SPARKLE-256. - */ -#define SPARKLE_256_STATE_SIZE 8 - -/** - * \brief Size of the state for SPARKLE-384. - */ -#define SPARKLE_384_STATE_SIZE 12 - -/** - * \brief Size of the state for SPARKLE-512. - */ -#define SPARKLE_512_STATE_SIZE 16 - -/** - * \brief Performs the SPARKLE-256 permutation. - * - * \param s The words of the SPARKLE-256 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 10. - */ -void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-384 permutation. - * - * \param s The words of the SPARKLE-384 state in little-endian byte order. - * \param steps The number of steps to perform, 7 or 11. - */ -void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); - -/** - * \brief Performs the SPARKLE-512 permutation. - * - * \param s The words of the SPARKLE-512 state in little-endian byte order. - * \param steps The number of steps to perform, 8 or 12. - */ -void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-util.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.c deleted file mode 100644 index e2aa25a..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.c +++ /dev/null @@ -1,1135 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sparkle.h" -#include "internal-sparkle.h" -#include - -aead_cipher_t const schwaemm_256_128_cipher = { - "Schwaemm256-128", - SCHWAEMM_256_128_KEY_SIZE, - SCHWAEMM_256_128_NONCE_SIZE, - SCHWAEMM_256_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_128_aead_encrypt, - schwaemm_256_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_192_192_cipher = { - "Schwaemm192-192", - SCHWAEMM_192_192_KEY_SIZE, - SCHWAEMM_192_192_NONCE_SIZE, - SCHWAEMM_192_192_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_192_192_aead_encrypt, - schwaemm_192_192_aead_decrypt -}; - -aead_cipher_t const schwaemm_128_128_cipher = { - "Schwaemm128-128", - SCHWAEMM_128_128_KEY_SIZE, - SCHWAEMM_128_128_NONCE_SIZE, - SCHWAEMM_128_128_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_128_128_aead_encrypt, - schwaemm_128_128_aead_decrypt -}; - -aead_cipher_t const schwaemm_256_256_cipher = { - "Schwaemm256-256", - SCHWAEMM_256_256_KEY_SIZE, - SCHWAEMM_256_256_NONCE_SIZE, - SCHWAEMM_256_256_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - schwaemm_256_256_aead_encrypt, - schwaemm_256_256_aead_decrypt -}; - -aead_hash_algorithm_t const esch_256_hash_algorithm = { - "Esch256", - sizeof(esch_256_hash_state_t), - ESCH_256_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_256_hash, - (aead_hash_init_t)esch_256_hash_init, - (aead_hash_update_t)esch_256_hash_update, - (aead_hash_finalize_t)esch_256_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -aead_hash_algorithm_t const esch_384_hash_algorithm = { - "Esch384", - sizeof(esch_384_hash_state_t), - ESCH_384_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - esch_384_hash, - (aead_hash_init_t)esch_384_hash_init, - (aead_hash_update_t)esch_384_hash_update, - (aead_hash_finalize_t)esch_384_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -/** - * \def DOMAIN(value) - * \brief Build a domain separation value as a 32-bit word. - * - * \param value The base value. - * \return The domain separation value as a 32-bit word. - */ -#if defined(LW_UTIL_LITTLE_ENDIAN) -#define DOMAIN(value) (((uint32_t)(value)) << 24) -#else -#define DOMAIN(value) (value) -#endif - -/** - * \brief Rate at which bytes are processed by Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_RIGHT(s) \ - (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_256_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[8]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[9]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[10]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-128. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_128_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_128_RATE) { - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_256_128_RATE; - adlen -= SCHWAEMM_256_128_RATE; - } - if (adlen == SCHWAEMM_256_128_RATE) { - s[11] ^= DOMAIN(0x05); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x04); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_256_128_RATE); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - mlen -= SCHWAEMM_256_128_RATE; - } - if (mlen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - memcpy(c, block, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return 0; -} - -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); - memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_256_128_RATE; - m += SCHWAEMM_256_128_RATE; - clen -= SCHWAEMM_256_128_RATE; - } - if (clen == SCHWAEMM_256_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); - s[11] ^= DOMAIN(0x07); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x06); - schwaemm_256_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RATE 24 - -/** - * \brief Pointer to the left of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_RIGHT(s) \ - (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - */ -#define schwaemm_192_192_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[3] ^ s[6]; \ - s[3] ^= t ^ s[9]; \ - t = s[1]; \ - s[1] = s[4] ^ s[7]; \ - s[4] ^= t ^ s[10]; \ - t = s[2]; \ - s[2] = s[5] ^ s[8]; \ - s[5] ^= t ^ s[11]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm192-192. - * - * \param s SPARKLE-384 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_192_192_authenticate - (uint32_t s[SPARKLE_384_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_192_192_RATE) { - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - ad += SCHWAEMM_192_192_RATE; - adlen -= SCHWAEMM_192_192_RATE; - } - if (adlen == SCHWAEMM_192_192_RATE) { - s[11] ^= DOMAIN(0x09); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[11] ^= DOMAIN(0x08); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); -} - -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint8_t block[SCHWAEMM_192_192_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - memcpy(c, block, SCHWAEMM_192_192_RATE); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - mlen -= SCHWAEMM_192_192_RATE; - } - if (mlen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - memcpy(c, block, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_384(s, 11); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return 0; -} - -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_192_192_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); - memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); - sparkle_384(s, 11); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_192_192_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_192_192_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - sparkle_384(s, 7); - c += SCHWAEMM_192_192_RATE; - m += SCHWAEMM_192_192_RATE; - clen -= SCHWAEMM_192_192_RATE; - } - if (clen == SCHWAEMM_192_192_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); - s[11] ^= DOMAIN(0x0B); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[11] ^= DOMAIN(0x0A); - schwaemm_192_192_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_384(s, 11); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RATE 16 - -/** - * \brief Pointer to the left of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_RIGHT(s) \ - (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - */ -#define schwaemm_128_128_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[2] ^ s[4]; \ - s[2] ^= t ^ s[6]; \ - t = s[1]; \ - s[1] = s[3] ^ s[5]; \ - s[3] ^= t ^ s[7]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm128-128. - * - * \param s SPARKLE-256 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_128_128_authenticate - (uint32_t s[SPARKLE_256_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_128_128_RATE) { - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - ad += SCHWAEMM_128_128_RATE; - adlen -= SCHWAEMM_128_128_RATE; - } - if (adlen == SCHWAEMM_128_128_RATE) { - s[7] ^= DOMAIN(0x05); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[7] ^= DOMAIN(0x04); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); -} - -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - uint8_t block[SCHWAEMM_128_128_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - memcpy(c, block, SCHWAEMM_128_128_RATE); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - mlen -= SCHWAEMM_128_128_RATE; - } - if (mlen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - memcpy(c, block, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_256(s, 10); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return 0; -} - -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_256_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_128_128_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); - memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); - sparkle_256(s, 10); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_128_128_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_128_128_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - sparkle_256(s, 7); - c += SCHWAEMM_128_128_RATE; - m += SCHWAEMM_128_128_RATE; - clen -= SCHWAEMM_128_128_RATE; - } - if (clen == SCHWAEMM_128_128_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); - s[7] ^= DOMAIN(0x07); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[7] ^= DOMAIN(0x06); - schwaemm_128_128_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_256(s, 10); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RATE 32 - -/** - * \brief Pointer to the left of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) - -/** - * \brief Pointer to the right of the state for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_RIGHT(s) \ - (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) - -/** - * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - */ -#define schwaemm_256_256_rho(s) \ - do { \ - uint32_t t = s[0]; \ - s[0] = s[4] ^ s[8]; \ - s[4] ^= t ^ s[12]; \ - t = s[1]; \ - s[1] = s[5] ^ s[9]; \ - s[5] ^= t ^ s[13]; \ - t = s[2]; \ - s[2] = s[6] ^ s[10]; \ - s[6] ^= t ^ s[14]; \ - t = s[3]; \ - s[3] = s[7] ^ s[11]; \ - s[7] ^= t ^ s[15]; \ - } while (0) - -/** - * \brief Authenticates the associated data for Schwaemm256-256. - * - * \param s SPARKLE-512 state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data; must be >= 1. - */ -static void schwaemm_256_256_authenticate - (uint32_t s[SPARKLE_512_STATE_SIZE], - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen > SCHWAEMM_256_256_RATE) { - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - ad += SCHWAEMM_256_256_RATE; - adlen -= SCHWAEMM_256_256_RATE; - } - if (adlen == SCHWAEMM_256_256_RATE) { - s[15] ^= DOMAIN(0x11); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)adlen; - s[15] ^= DOMAIN(0x10); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, ad, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); -} - -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint8_t block[SCHWAEMM_256_256_RATE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) { - while (mlen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - memcpy(c, block, SCHWAEMM_256_256_RATE); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - mlen -= SCHWAEMM_256_256_RATE; - } - if (mlen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - memcpy(c, block, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_src(block, (unsigned char *)s, m, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - memcpy(c, block, temp); - } - sparkle_512(s, 12); - c += mlen; - } - - /* Generate the authentication tag */ - lw_xor_block_2_src - (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return 0; -} - -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SCHWAEMM_256_256_TAG_SIZE) - return -1; - *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; - - /* Initialize the state with the nonce and the key */ - memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); - memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); - sparkle_512(s, 12); - - /* Process the associated data */ - if (adlen > 0) - schwaemm_256_256_authenticate(s, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SCHWAEMM_256_256_TAG_SIZE; - if (clen > 0) { - while (clen > SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - sparkle_512(s, 8); - c += SCHWAEMM_256_256_RATE; - m += SCHWAEMM_256_256_RATE; - clen -= SCHWAEMM_256_256_RATE; - } - if (clen == SCHWAEMM_256_256_RATE) { - lw_xor_block_2_src - (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); - s[15] ^= DOMAIN(0x13); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); - } else { - unsigned temp = (unsigned)clen; - lw_xor_block_2_src(m, (unsigned char *)s, c, temp); - s[15] ^= DOMAIN(0x12); - schwaemm_256_256_rho(s); - lw_xor_block((unsigned char *)s, m, temp); - ((unsigned char *)s)[temp] ^= 0x80; - } - sparkle_512(s, 12); - c += clen; - } - - /* Check the authentication tag */ - lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); - return aead_check_tag - (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); -} - -/** - * \brief Rate at which bytes are processed by Esch256. - */ -#define ESCH_256_RATE 16 - -/** - * \brief Perform the M3 step for Esch256 to mix the input with the state. - * - * \param s SPARKLE-384 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_256_m3(s, block, domain) \ - do { \ - uint32_t tx = (block)[0] ^ (block)[2]; \ - uint32_t ty = (block)[1] ^ (block)[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= (block)[0] ^ ty; \ - s[1] ^= (block)[1] ^ tx; \ - s[2] ^= (block)[2] ^ ty; \ - s[3] ^= (block)[3] ^ tx; \ - if ((domain) != 0) \ - s[5] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - } while (0) - -/** @cond esch_256 */ - -/** - * \brief Word-based state for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_384_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_256_hash_state_wt; - -/** @endcond */ - -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_384_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x00); - sparkle_384(s, 7); - in += ESCH_256_RATE; - inlen -= ESCH_256_RATE; - } - if (inlen == ESCH_256_RATE) { - memcpy(block, in, ESCH_256_RATE); - esch_256_m3(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(s, block, 0x01); - } - sparkle_384(s, 11); - memcpy(out, s, ESCH_256_RATE); - sparkle_384(s, 7); - memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); - return 0; -} - -void esch_256_hash_init(esch_256_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_256_hash_state_t)); -} - -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x00); - sparkle_384(st->s.state, 7); - st->s.count = 0; - } - temp = ESCH_256_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out) -{ - esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_256_RATE) { - esch_256_m3(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_256_RATE - temp - 1); - esch_256_m3(st->s.state, st->s.block, 0x01); - } - sparkle_384(st->s.state, 11); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_256_RATE); - sparkle_384(st->s.state, 7); - memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); -} - -/** - * \brief Rate at which bytes are processed by Esch384. - */ -#define ESCH_384_RATE 16 - -/** - * \brief Perform the M4 step for Esch384 to mix the input with the state. - * - * \param s SPARKLE-512 state. - * \param block Block of input data that has been padded to the rate. - * \param domain Domain separator for this phase. - */ -#define esch_384_m4(s, block, domain) \ - do { \ - uint32_t tx = block[0] ^ block[2]; \ - uint32_t ty = block[1] ^ block[3]; \ - tx = leftRotate16(tx ^ (tx << 16)); \ - ty = leftRotate16(ty ^ (ty << 16)); \ - s[0] ^= block[0] ^ ty; \ - s[1] ^= block[1] ^ tx; \ - s[2] ^= block[2] ^ ty; \ - s[3] ^= block[3] ^ tx; \ - if ((domain) != 0) \ - s[7] ^= DOMAIN(domain); \ - s[4] ^= ty; \ - s[5] ^= tx; \ - s[6] ^= ty; \ - s[7] ^= tx; \ - } while (0) - -/** @cond esch_384 */ - -/** - * \brief Word-based state for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - uint32_t state[SPARKLE_512_STATE_SIZE]; - uint32_t block[4]; - unsigned char count; - } s; - unsigned long long align; - -} esch_384_hash_state_wt; - -/** @endcond */ - -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - uint32_t s[SPARKLE_512_STATE_SIZE]; - uint32_t block[ESCH_256_RATE / 4]; - memset(s, 0, sizeof(s)); - while (inlen > ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x00); - sparkle_512(s, 8); - in += ESCH_384_RATE; - inlen -= ESCH_384_RATE; - } - if (inlen == ESCH_384_RATE) { - memcpy(block, in, ESCH_384_RATE); - esch_384_m4(s, block, 0x02); - } else { - unsigned temp = (unsigned)inlen; - memcpy(block, in, temp); - ((unsigned char *)block)[temp] = 0x80; - memset(((unsigned char *)block) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(s, block, 0x01); - } - sparkle_512(s, 12); - memcpy(out, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); - sparkle_512(s, 8); - memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); - return 0; -} - -void esch_384_hash_init(esch_384_hash_state_t *state) -{ - memset(state, 0, sizeof(esch_384_hash_state_t)); -} - -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - unsigned temp; - while (inlen > 0) { - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x00); - sparkle_512(st->s.state, 8); - st->s.count = 0; - } - temp = ESCH_384_RATE - st->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); - st->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out) -{ - esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; - - /* Pad and process the last block */ - if (st->s.count == ESCH_384_RATE) { - esch_384_m4(st->s.state, st->s.block, 0x02); - } else { - unsigned temp = st->s.count; - ((unsigned char *)(st->s.block))[temp] = 0x80; - memset(((unsigned char *)(st->s.block)) + temp + 1, 0, - ESCH_384_RATE - temp - 1); - esch_384_m4(st->s.state, st->s.block, 0x01); - } - sparkle_512(st->s.state, 12); - - /* Generate the final hash value */ - memcpy(out, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); - sparkle_512(st->s.state, 8); - memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); -} diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.h deleted file mode 100644 index dd0999e..0000000 --- a/sparkle/Implementations/crypto_hash/esch384v1/rhys-avr/sparkle.h +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPARKLE_H -#define LWCRYPTO_SPARKLE_H - -#include "aead-common.h" - -/** - * \file sparkle.h - * \brief Encryption and hash algorithms based on the SPARKLE permutation. - * - * SPARKLE is a family of encryption and hash algorithms that are based - * around the SPARKLE permutation. There are three versions of the - * permutation with 256-bit, 384-bit, and 512-bit state sizes. - * The algorithms in the family are: - * - * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. - * This is the primary encryption algorithm in the family. - * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. - * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. - * \li Esch256 hash algorithm with a 256-bit digest output. This is the - * primary hash algorithm in the family. - * \li Esch384 hash algorithm with a 384-bit digest output. - * - * References: https://www.cryptolux.org/index.php/Sparkle - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm256-128. - */ -#define SCHWAEMM_256_128_NONCE_SIZE 32 - -/** - * \brief Size of the key for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_KEY_SIZE 24 - -/** - * \brief Size of the authentication tag for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_TAG_SIZE 24 - -/** - * \brief Size of the nonce for Schwaemm192-192. - */ -#define SCHWAEMM_192_192_NONCE_SIZE 24 - -/** - * \brief Size of the key for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Schwaemm128-128. - */ -#define SCHWAEMM_128_128_NONCE_SIZE 16 - -/** - * \brief Size of the key for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_TAG_SIZE 32 - -/** - * \brief Size of the nonce for Schwaemm256-256. - */ -#define SCHWAEMM_256_256_NONCE_SIZE 32 - -/** - * \brief Size of the hash output for Esch256. - */ -#define ESCH_256_HASH_SIZE 32 - -/** - * \brief Size of the hash output for Esch384. - */ -#define ESCH_384_HASH_SIZE 48 - -/** - * \brief Meta-information block for the Schwaemm256-128 cipher. - */ -extern aead_cipher_t const schwaemm_256_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm192-192 cipher. - */ -extern aead_cipher_t const schwaemm_192_192_cipher; - -/** - * \brief Meta-information block for the Schwaemm128-128 cipher. - */ -extern aead_cipher_t const schwaemm_128_128_cipher; - -/** - * \brief Meta-information block for the Schwaemm256-256 cipher. - */ -extern aead_cipher_t const schwaemm_256_256_cipher; - -/** - * \brief Meta-information block for the Esch256 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_256_hash_algorithm; - -/** - * \brief Meta-information block for the Esch384 hash algorithm. - */ -extern aead_hash_algorithm_t const esch_384_hash_algorithm; - -/** - * \brief State information for the Esch256 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_256_hash_state_t; - -/** - * \brief State information for the Esch384 incremental hash mode. - */ -typedef union -{ - struct { - unsigned char state[64]; /**< Current hash state */ - unsigned char block[16]; /**< Partial input data block */ - unsigned char count; /**< Number of bytes in the current block */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} esch_384_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_128_aead_decrypt() - */ -int schwaemm_256_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 32 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_128_aead_encrypt() - */ -int schwaemm_256_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm192-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 24 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_192_192_aead_decrypt() - */ -int schwaemm_192_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm192-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 24 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 24 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_192_192_aead_encrypt() - */ -int schwaemm_192_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm128-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_128_128_aead_decrypt() - */ -int schwaemm_128_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm128-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_128_128_aead_encrypt() - */ -int schwaemm_128_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Schwaemm256-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa schwaemm_256_256_aead_decrypt() - */ -int schwaemm_256_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Schwaemm256-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa schwaemm_256_256_aead_encrypt() - */ -int schwaemm_256_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Esch256 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_256_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_256_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch256 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() - */ -void esch_256_hash_init(esch_256_hash_state_t *state); - -/** - * \brief Updates an Esch256 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_256_hash_init(), esch_256_hash_finalize() - */ -void esch_256_hash_update - (esch_256_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch256 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa esch_256_hash_init(), esch_256_hash_update() - */ -void esch_256_hash_finalize - (esch_256_hash_state_t *state, unsigned char *out); - -/** - * \brief Hashes a block of input data with Esch384 to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * ESCH_384_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int esch_384_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for an Esch384 hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() - */ -void esch_384_hash_init(esch_384_hash_state_t *state); - -/** - * \brief Updates an Esch384 state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa esch_384_hash_init(), esch_384_hash_finalize() - */ -void esch_384_hash_update - (esch_384_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from an Esch384 hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 48-byte hash value. - * - * \sa esch_384_hash_init(), esch_384_hash_update() - */ -void esch_384_hash_finalize - (esch_384_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/api.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys/api.h new file mode 100644 index 0000000..d507385 --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 48 diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/hash.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys/hash.c new file mode 100644 index 0000000..9acc9f9 --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "sparkle.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return esch_384_hash(out, in, inlen); +} diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle-avr.S b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle-avr.S new file mode 100644 index 0000000..753ea2f --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle-avr.S @@ -0,0 +1,2887 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global sparkle_256 + .type sparkle_256, @function +sparkle_256: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 129f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 129f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 129f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 129f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 129f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 129f + pop r18 + cpi r18,7 + brne 5094f + rjmp 615f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 129f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 129f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 129f + rjmp 615f +129: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + movw r18,r4 + movw r20,r6 + movw r4,r14 + movw r6,r12 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + movw r8,r18 + movw r10,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + ld r18,Z + ldd r19,Z+1 + ldd r20,Z+2 + ldd r21,Z+3 + movw r14,r22 + movw r12,r26 + eor r14,r18 + eor r15,r19 + eor r12,r20 + eor r13,r21 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + movw r22,r16 + movw r26,r24 + eor r22,r28 + eor r23,r29 + eor r26,r2 + eor r27,r3 + movw r28,r14 + movw r2,r12 + ret +615: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_256, .-sparkle_256 + + .text +.global sparkle_384 + .type sparkle_384, @function +sparkle_384: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 140f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 140f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 140f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 140f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 140f + pop r18 + cpi r18,7 + brne 5094f + rjmp 886f +5094: + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 140f + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 140f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 140f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 140f + rjmp 886f +140: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + ldd r18,Z+28 + ldd r19,Z+29 + ldd r20,Z+30 + ldd r21,Z+31 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+20 + ldd r9,Z+21 + ldd r10,Z+22 + ldd r11,Z+23 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r0,Z+4 + eor r18,r0 + ldd r0,Z+5 + eor r19,r0 + ldd r0,Z+6 + eor r20,r0 + ldd r0,Z+7 + eor r21,r0 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + ldd r18,Z+4 + ldd r19,Z+5 + ldd r20,Z+6 + ldd r21,Z+7 + std Z+28,r18 + std Z+29,r19 + std Z+30,r20 + std Z+31,r21 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + std Z+36,r18 + std Z+37,r19 + std Z+38,r20 + std Z+39,r21 + eor r8,r14 + eor r9,r15 + eor r10,r12 + eor r11,r13 + ldd r18,Z+24 + ldd r19,Z+25 + ldd r20,Z+26 + ldd r21,Z+27 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r28,Z+16 + ldd r29,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+24,r14 + std Z+25,r15 + std Z+26,r12 + std Z+27,r13 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + std Z+32,r18 + std Z+33,r19 + std Z+34,r20 + std Z+35,r21 + eor r28,r16 + eor r29,r17 + eor r2,r24 + eor r3,r25 + ret +886: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_384, .-sparkle_384 + + .text +.global sparkle_512 + .type sparkle_512, @function +sparkle_512: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + push r22 + ld r22,Z + ldd r23,Z+1 + ldd r26,Z+2 + ldd r27,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r28,Z+8 + ldd r29,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,1 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,2 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,3 + eor r8,r18 + rcall 151f + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,4 + eor r8,r18 + rcall 151f + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,5 + eor r8,r18 + rcall 151f + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,6 + eor r8,r18 + rcall 151f + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,7 + eor r8,r18 + rcall 151f + pop r18 + cpi r18,8 + brne 5105f + rjmp 1189f +5105: + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,8 + eor r8,r18 + rcall 151f + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,9 + eor r8,r18 + rcall 151f + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,10 + eor r8,r18 + rcall 151f + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,11 + eor r8,r18 + rcall 151f + rjmp 1189f +151: + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,98 + ldi r19,81 + ldi r20,225 + ldi r21,183 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,128 + ldi r19,88 + ldi r20,113 + ldi r21,191 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + movw r12,r22 + movw r14,r26 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + movw r24,r4 + movw r16,r6 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + ldd r28,Z+24 + ldd r29,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,86 + ldi r19,218 + ldi r20,180 + ldi r21,56 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,56 + ldi r19,119 + ldi r20,78 + ldi r21,50 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r22 + std Z+17,r23 + std Z+18,r26 + std Z+19,r27 + std Z+20,r4 + std Z+21,r5 + std Z+22,r6 + std Z+23,r7 + std Z+24,r28 + std Z+25,r29 + std Z+26,r2 + std Z+27,r3 + std Z+28,r8 + std Z+29,r9 + std Z+30,r10 + std Z+31,r11 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + eor r12,r28 + eor r13,r29 + eor r14,r2 + eor r15,r3 + eor r24,r4 + eor r25,r5 + eor r16,r6 + eor r17,r7 + eor r24,r8 + eor r25,r9 + eor r16,r10 + eor r17,r11 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + ldd r28,Z+40 + ldd r29,Z+41 + ldd r2,Z+42 + ldd r3,Z+43 + ldd r8,Z+44 + ldd r9,Z+45 + ldd r10,Z+46 + ldd r11,Z+47 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,235 + ldi r19,133 + ldi r20,17 + ldi r21,187 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,87 + ldi r19,123 + ldi r20,124 + ldi r21,79 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + std Z+32,r22 + std Z+33,r23 + std Z+34,r26 + std Z+35,r27 + std Z+36,r4 + std Z+37,r5 + std Z+38,r6 + std Z+39,r7 + std Z+40,r28 + std Z+41,r29 + std Z+42,r2 + std Z+43,r3 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r22,Z+48 + ldd r23,Z+49 + ldd r26,Z+50 + ldd r27,Z+51 + ldd r4,Z+52 + ldd r5,Z+53 + ldd r6,Z+54 + ldd r7,Z+55 + ldd r28,Z+56 + ldd r29,Z+57 + ldd r2,Z+58 + ldd r3,Z+59 + ldd r8,Z+60 + ldd r9,Z+61 + ldd r10,Z+62 + ldd r11,Z+63 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r22,r18 + adc r23,r19 + adc r26,r20 + adc r27,r21 + eor r4,r27 + eor r5,r22 + eor r6,r23 + eor r7,r26 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r4 + movw r20,r6 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r22,r20 + adc r23,r21 + adc r26,r18 + adc r27,r19 + movw r18,r22 + movw r20,r26 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r4,r20 + eor r5,r21 + eor r6,r18 + eor r7,r19 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r4 + adc r23,r5 + adc r26,r6 + adc r27,r7 + movw r18,r22 + movw r20,r26 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r18,200 + ldi r19,161 + ldi r20,191 + ldi r21,207 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + add r22,r7 + adc r23,r4 + adc r26,r5 + adc r27,r6 + eor r4,r26 + eor r5,r27 + eor r6,r22 + eor r7,r23 + eor r22,r18 + eor r23,r19 + eor r26,r20 + eor r27,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + add r28,r18 + adc r29,r19 + adc r2,r20 + adc r3,r21 + eor r8,r3 + eor r9,r28 + eor r10,r29 + eor r11,r2 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + movw r18,r8 + movw r20,r10 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + add r28,r20 + adc r29,r21 + adc r2,r18 + adc r3,r19 + movw r18,r28 + movw r20,r2 + bst r18,0 + lsr r21 + ror r20 + ror r19 + ror r18 + bld r21,7 + eor r8,r20 + eor r9,r21 + eor r10,r18 + eor r11,r19 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r8 + adc r29,r9 + adc r2,r10 + adc r3,r11 + movw r18,r28 + movw r20,r2 + lsl r18 + rol r19 + rol r20 + rol r21 + adc r18,r1 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ldi r18,61 + ldi r19,41 + ldi r20,179 + ldi r21,194 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + add r28,r11 + adc r29,r8 + adc r2,r9 + adc r3,r10 + eor r8,r2 + eor r9,r3 + eor r10,r28 + eor r11,r29 + eor r28,r18 + eor r29,r19 + eor r2,r20 + eor r3,r21 + eor r14,r12 + eor r15,r13 + eor r16,r24 + eor r17,r25 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r4,Z+36 + ldd r5,Z+37 + ldd r6,Z+38 + ldd r7,Z+39 + eor r4,r14 + eor r5,r15 + eor r6,r12 + eor r7,r13 + ldd r18,Z+44 + ldd r19,Z+45 + ldd r20,Z+46 + ldd r21,Z+47 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + eor r14,r8 + eor r15,r9 + eor r12,r10 + eor r13,r11 + ldd r8,Z+28 + ldd r9,Z+29 + ldd r10,Z+30 + ldd r11,Z+31 + std Z+60,r8 + std Z+61,r9 + std Z+62,r10 + std Z+63,r11 + ldd r8,Z+4 + ldd r9,Z+5 + ldd r10,Z+6 + ldd r11,Z+7 + eor r4,r8 + eor r5,r9 + eor r6,r10 + eor r7,r11 + std Z+28,r4 + std Z+29,r5 + std Z+30,r6 + std Z+31,r7 + std Z+36,r8 + std Z+37,r9 + std Z+38,r10 + std Z+39,r11 + ldd r8,Z+12 + ldd r9,Z+13 + ldd r10,Z+14 + ldd r11,Z+15 + eor r18,r8 + eor r19,r9 + eor r20,r10 + eor r21,r11 + std Z+44,r8 + std Z+45,r9 + std Z+46,r10 + std Z+47,r11 + ldd r8,Z+52 + ldd r9,Z+53 + ldd r10,Z+54 + ldd r11,Z+55 + ldd r4,Z+20 + ldd r5,Z+21 + ldd r6,Z+22 + ldd r7,Z+23 + eor r8,r4 + eor r9,r5 + eor r10,r6 + eor r11,r7 + std Z+52,r4 + std Z+53,r5 + std Z+54,r6 + std Z+55,r7 + ldd r0,Z+60 + eor r14,r0 + ldd r0,Z+61 + eor r15,r0 + ldd r0,Z+62 + eor r12,r0 + ldd r0,Z+63 + eor r13,r0 + std Z+20,r14 + std Z+21,r15 + std Z+22,r12 + std Z+23,r13 + movw r4,r18 + movw r6,r20 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + std Z+48,r22 + std Z+49,r23 + std Z+50,r26 + std Z+51,r27 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r26,Z+34 + ldd r27,Z+35 + eor r22,r16 + eor r23,r17 + eor r26,r24 + eor r27,r25 + ldd r18,Z+40 + ldd r19,Z+41 + ldd r20,Z+42 + ldd r21,Z+43 + eor r18,r16 + eor r19,r17 + eor r20,r24 + eor r21,r25 + eor r16,r28 + eor r17,r29 + eor r24,r2 + eor r25,r3 + ldd r14,Z+24 + ldd r15,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+56,r14 + std Z+57,r15 + std Z+58,r12 + std Z+59,r13 + ld r14,Z + ldd r15,Z+1 + ldd r12,Z+2 + ldd r13,Z+3 + eor r22,r14 + eor r23,r15 + eor r26,r12 + eor r27,r13 + std Z+24,r22 + std Z+25,r23 + std Z+26,r26 + std Z+27,r27 + std Z+32,r14 + std Z+33,r15 + std Z+34,r12 + std Z+35,r13 + ldd r14,Z+8 + ldd r15,Z+9 + ldd r12,Z+10 + ldd r13,Z+11 + eor r18,r14 + eor r19,r15 + eor r20,r12 + eor r21,r13 + movw r22,r18 + movw r26,r20 + std Z+40,r14 + std Z+41,r15 + std Z+42,r12 + std Z+43,r13 + ldd r28,Z+48 + ldd r29,Z+49 + ldd r2,Z+50 + ldd r3,Z+51 + ldd r14,Z+16 + ldd r15,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + eor r28,r14 + eor r29,r15 + eor r2,r12 + eor r3,r13 + std Z+48,r14 + std Z+49,r15 + std Z+50,r12 + std Z+51,r13 + ldd r0,Z+56 + eor r16,r0 + ldd r0,Z+57 + eor r17,r0 + ldd r0,Z+58 + eor r24,r0 + ldd r0,Z+59 + eor r25,r0 + std Z+16,r16 + std Z+17,r17 + std Z+18,r24 + std Z+19,r25 + ret +1189: + st Z,r22 + std Z+1,r23 + std Z+2,r26 + std Z+3,r27 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r28 + std Z+9,r29 + std Z+10,r2 + std Z+11,r3 + std Z+12,r8 + std Z+13,r9 + std Z+14,r10 + std Z+15,r11 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sparkle_512, .-sparkle_512 + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.c new file mode 100644 index 0000000..4a4c0fb --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.c @@ -0,0 +1,382 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-sparkle.h" + +#if !defined(__AVR__) + +/* The 8 basic round constants from the specification */ +#define RC_0 0xB7E15162 +#define RC_1 0xBF715880 +#define RC_2 0x38B4DA56 +#define RC_3 0x324E7738 +#define RC_4 0xBB1185EB +#define RC_5 0x4F7C7B57 +#define RC_6 0xCFBFA1C8 +#define RC_7 0xC2B3293D + +/* Round constants for all SPARKLE steps; maximum of 12 for SPARKLE-512 */ +static uint32_t const sparkle_rc[12] = { + RC_0, RC_1, RC_2, RC_3, RC_4, RC_5, RC_6, RC_7, + RC_0, RC_1, RC_2, RC_3 +}; + +/** + * \brief Alzette block cipher that implements the ARXbox layer of the + * SPARKLE permutation. + * + * \param x Left half of the 64-bit block. + * \param y Right half of the 64-bit block. + * \param k 32-bit round key. + */ +#define alzette(x, y, k) \ + do { \ + (x) += leftRotate1((y)); \ + (y) ^= leftRotate8((x)); \ + (x) ^= (k); \ + (x) += leftRotate15((y)); \ + (y) ^= leftRotate15((x)); \ + (x) ^= (k); \ + (x) += (y); \ + (y) ^= leftRotate1((x)); \ + (x) ^= (k); \ + (x) += leftRotate8((y)); \ + (y) ^= leftRotate16((x)); \ + (x) ^= (k); \ + } while (0) + +void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps) +{ + uint32_t x0, x1, x2, x3; + uint32_t y0, y1, y2, y3; + uint32_t tx, ty; + unsigned step; + + /* Load the SPARKLE-256 state up into local variables */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + x0 = s[0]; + y0 = s[1]; + x1 = s[2]; + y1 = s[3]; + x2 = s[4]; + y2 = s[5]; + x3 = s[6]; + y3 = s[7]; +#else + x0 = le_load_word32((const uint8_t *)&(s[0])); + y0 = le_load_word32((const uint8_t *)&(s[1])); + x1 = le_load_word32((const uint8_t *)&(s[2])); + y1 = le_load_word32((const uint8_t *)&(s[3])); + x2 = le_load_word32((const uint8_t *)&(s[4])); + y2 = le_load_word32((const uint8_t *)&(s[5])); + x3 = le_load_word32((const uint8_t *)&(s[6])); + y3 = le_load_word32((const uint8_t *)&(s[7])); +#endif + + /* Perform all requested steps */ + for (step = 0; step < steps; ++step) { + /* Add round constants */ + y0 ^= sparkle_rc[step]; + y1 ^= step; + + /* ARXbox layer */ + alzette(x0, y0, RC_0); + alzette(x1, y1, RC_1); + alzette(x2, y2, RC_2); + alzette(x3, y3, RC_3); + + /* Linear layer */ + tx = x0 ^ x1; + ty = y0 ^ y1; + tx = leftRotate16(tx ^ (tx << 16)); + ty = leftRotate16(ty ^ (ty << 16)); + y2 ^= tx; + tx ^= y3; + y3 = y1; + y1 = y2 ^ y0; + y2 = y0; + y0 = tx ^ y3; + x2 ^= ty; + ty ^= x3; + x3 = x1; + x1 = x2 ^ x0; + x2 = x0; + x0 = ty ^ x3; + } + + /* Write the local variables back to the SPARKLE-256 state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + s[0] = x0; + s[1] = y0; + s[2] = x1; + s[3] = y1; + s[4] = x2; + s[5] = y2; + s[6] = x3; + s[7] = y3; +#else + le_store_word32((uint8_t *)&(s[0]), x0); + le_store_word32((uint8_t *)&(s[1]), y0); + le_store_word32((uint8_t *)&(s[2]), x1); + le_store_word32((uint8_t *)&(s[3]), y1); + le_store_word32((uint8_t *)&(s[4]), x2); + le_store_word32((uint8_t *)&(s[5]), y2); + le_store_word32((uint8_t *)&(s[6]), x3); + le_store_word32((uint8_t *)&(s[7]), y3); +#endif +} + +void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps) +{ + uint32_t x0, x1, x2, x3, x4, x5; + uint32_t y0, y1, y2, y3, y4, y5; + uint32_t tx, ty; + unsigned step; + + /* Load the SPARKLE-384 state up into local variables */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + x0 = s[0]; + y0 = s[1]; + x1 = s[2]; + y1 = s[3]; + x2 = s[4]; + y2 = s[5]; + x3 = s[6]; + y3 = s[7]; + x4 = s[8]; + y4 = s[9]; + x5 = s[10]; + y5 = s[11]; +#else + x0 = le_load_word32((const uint8_t *)&(s[0])); + y0 = le_load_word32((const uint8_t *)&(s[1])); + x1 = le_load_word32((const uint8_t *)&(s[2])); + y1 = le_load_word32((const uint8_t *)&(s[3])); + x2 = le_load_word32((const uint8_t *)&(s[4])); + y2 = le_load_word32((const uint8_t *)&(s[5])); + x3 = le_load_word32((const uint8_t *)&(s[6])); + y3 = le_load_word32((const uint8_t *)&(s[7])); + x4 = le_load_word32((const uint8_t *)&(s[8])); + y4 = le_load_word32((const uint8_t *)&(s[9])); + x5 = le_load_word32((const uint8_t *)&(s[10])); + y5 = le_load_word32((const uint8_t *)&(s[11])); +#endif + + /* Perform all requested steps */ + for (step = 0; step < steps; ++step) { + /* Add round constants */ + y0 ^= sparkle_rc[step]; + y1 ^= step; + + /* ARXbox layer */ + alzette(x0, y0, RC_0); + alzette(x1, y1, RC_1); + alzette(x2, y2, RC_2); + alzette(x3, y3, RC_3); + alzette(x4, y4, RC_4); + alzette(x5, y5, RC_5); + + /* Linear layer */ + tx = x0 ^ x1 ^ x2; + ty = y0 ^ y1 ^ y2; + tx = leftRotate16(tx ^ (tx << 16)); + ty = leftRotate16(ty ^ (ty << 16)); + y3 ^= tx; + y4 ^= tx; + tx ^= y5; + y5 = y2; + y2 = y3 ^ y0; + y3 = y0; + y0 = y4 ^ y1; + y4 = y1; + y1 = tx ^ y5; + x3 ^= ty; + x4 ^= ty; + ty ^= x5; + x5 = x2; + x2 = x3 ^ x0; + x3 = x0; + x0 = x4 ^ x1; + x4 = x1; + x1 = ty ^ x5; + } + + /* Write the local variables back to the SPARKLE-384 state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + s[0] = x0; + s[1] = y0; + s[2] = x1; + s[3] = y1; + s[4] = x2; + s[5] = y2; + s[6] = x3; + s[7] = y3; + s[8] = x4; + s[9] = y4; + s[10] = x5; + s[11] = y5; +#else + le_store_word32((uint8_t *)&(s[0]), x0); + le_store_word32((uint8_t *)&(s[1]), y0); + le_store_word32((uint8_t *)&(s[2]), x1); + le_store_word32((uint8_t *)&(s[3]), y1); + le_store_word32((uint8_t *)&(s[4]), x2); + le_store_word32((uint8_t *)&(s[5]), y2); + le_store_word32((uint8_t *)&(s[6]), x3); + le_store_word32((uint8_t *)&(s[7]), y3); + le_store_word32((uint8_t *)&(s[8]), x4); + le_store_word32((uint8_t *)&(s[9]), y4); + le_store_word32((uint8_t *)&(s[10]), x5); + le_store_word32((uint8_t *)&(s[11]), y5); +#endif +} + +void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps) +{ + uint32_t x0, x1, x2, x3, x4, x5, x6, x7; + uint32_t y0, y1, y2, y3, y4, y5, y6, y7; + uint32_t tx, ty; + unsigned step; + + /* Load the SPARKLE-512 state up into local variables */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + x0 = s[0]; + y0 = s[1]; + x1 = s[2]; + y1 = s[3]; + x2 = s[4]; + y2 = s[5]; + x3 = s[6]; + y3 = s[7]; + x4 = s[8]; + y4 = s[9]; + x5 = s[10]; + y5 = s[11]; + x6 = s[12]; + y6 = s[13]; + x7 = s[14]; + y7 = s[15]; +#else + x0 = le_load_word32((const uint8_t *)&(s[0])); + y0 = le_load_word32((const uint8_t *)&(s[1])); + x1 = le_load_word32((const uint8_t *)&(s[2])); + y1 = le_load_word32((const uint8_t *)&(s[3])); + x2 = le_load_word32((const uint8_t *)&(s[4])); + y2 = le_load_word32((const uint8_t *)&(s[5])); + x3 = le_load_word32((const uint8_t *)&(s[6])); + y3 = le_load_word32((const uint8_t *)&(s[7])); + x4 = le_load_word32((const uint8_t *)&(s[8])); + y4 = le_load_word32((const uint8_t *)&(s[9])); + x5 = le_load_word32((const uint8_t *)&(s[10])); + y5 = le_load_word32((const uint8_t *)&(s[11])); + x6 = le_load_word32((const uint8_t *)&(s[12])); + y6 = le_load_word32((const uint8_t *)&(s[13])); + x7 = le_load_word32((const uint8_t *)&(s[14])); + y7 = le_load_word32((const uint8_t *)&(s[15])); +#endif + + /* Perform all requested steps */ + for (step = 0; step < steps; ++step) { + /* Add round constants */ + y0 ^= sparkle_rc[step]; + y1 ^= step; + + /* ARXbox layer */ + alzette(x0, y0, RC_0); + alzette(x1, y1, RC_1); + alzette(x2, y2, RC_2); + alzette(x3, y3, RC_3); + alzette(x4, y4, RC_4); + alzette(x5, y5, RC_5); + alzette(x6, y6, RC_6); + alzette(x7, y7, RC_7); + + /* Linear layer */ + tx = x0 ^ x1 ^ x2 ^ x3; + ty = y0 ^ y1 ^ y2 ^ y3; + tx = leftRotate16(tx ^ (tx << 16)); + ty = leftRotate16(ty ^ (ty << 16)); + y4 ^= tx; + y5 ^= tx; + y6 ^= tx; + tx ^= y7; + y7 = y3; + y3 = y4 ^ y0; + y4 = y0; + y0 = y5 ^ y1; + y5 = y1; + y1 = y6 ^ y2; + y6 = y2; + y2 = tx ^ y7; + x4 ^= ty; + x5 ^= ty; + x6 ^= ty; + ty ^= x7; + x7 = x3; + x3 = x4 ^ x0; + x4 = x0; + x0 = x5 ^ x1; + x5 = x1; + x1 = x6 ^ x2; + x6 = x2; + x2 = ty ^ x7; + } + + /* Write the local variables back to the SPARKLE-512 state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + s[0] = x0; + s[1] = y0; + s[2] = x1; + s[3] = y1; + s[4] = x2; + s[5] = y2; + s[6] = x3; + s[7] = y3; + s[8] = x4; + s[9] = y4; + s[10] = x5; + s[11] = y5; + s[12] = x6; + s[13] = y6; + s[14] = x7; + s[15] = y7; +#else + le_store_word32((uint8_t *)&(s[0]), x0); + le_store_word32((uint8_t *)&(s[1]), y0); + le_store_word32((uint8_t *)&(s[2]), x1); + le_store_word32((uint8_t *)&(s[3]), y1); + le_store_word32((uint8_t *)&(s[4]), x2); + le_store_word32((uint8_t *)&(s[5]), y2); + le_store_word32((uint8_t *)&(s[6]), x3); + le_store_word32((uint8_t *)&(s[7]), y3); + le_store_word32((uint8_t *)&(s[8]), x4); + le_store_word32((uint8_t *)&(s[9]), y4); + le_store_word32((uint8_t *)&(s[10]), x5); + le_store_word32((uint8_t *)&(s[11]), y5); + le_store_word32((uint8_t *)&(s[12]), x6); + le_store_word32((uint8_t *)&(s[13]), y6); + le_store_word32((uint8_t *)&(s[14]), x7); + le_store_word32((uint8_t *)&(s[15]), y7); +#endif +} + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.h new file mode 100644 index 0000000..fbdabc1 --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-sparkle.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SPARKLE_H +#define LW_INTERNAL_SPARKLE_H + +#include "internal-util.h" + +/** + * \file internal-sparkle.h + * \brief Internal implementation of the SPARKLE permutation. + * + * References: https://www.cryptolux.org/index.php/Sparkle + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the state for SPARKLE-256. + */ +#define SPARKLE_256_STATE_SIZE 8 + +/** + * \brief Size of the state for SPARKLE-384. + */ +#define SPARKLE_384_STATE_SIZE 12 + +/** + * \brief Size of the state for SPARKLE-512. + */ +#define SPARKLE_512_STATE_SIZE 16 + +/** + * \brief Performs the SPARKLE-256 permutation. + * + * \param s The words of the SPARKLE-256 state in little-endian byte order. + * \param steps The number of steps to perform, 7 or 10. + */ +void sparkle_256(uint32_t s[SPARKLE_256_STATE_SIZE], unsigned steps); + +/** + * \brief Performs the SPARKLE-384 permutation. + * + * \param s The words of the SPARKLE-384 state in little-endian byte order. + * \param steps The number of steps to perform, 7 or 11. + */ +void sparkle_384(uint32_t s[SPARKLE_384_STATE_SIZE], unsigned steps); + +/** + * \brief Performs the SPARKLE-512 permutation. + * + * \param s The words of the SPARKLE-512 state in little-endian byte order. + * \param steps The number of steps to perform, 8 or 12. + */ +void sparkle_512(uint32_t s[SPARKLE_512_STATE_SIZE], unsigned steps); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-util.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.c b/sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.c new file mode 100644 index 0000000..e2aa25a --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.c @@ -0,0 +1,1135 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "sparkle.h" +#include "internal-sparkle.h" +#include + +aead_cipher_t const schwaemm_256_128_cipher = { + "Schwaemm256-128", + SCHWAEMM_256_128_KEY_SIZE, + SCHWAEMM_256_128_NONCE_SIZE, + SCHWAEMM_256_128_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_256_128_aead_encrypt, + schwaemm_256_128_aead_decrypt +}; + +aead_cipher_t const schwaemm_192_192_cipher = { + "Schwaemm192-192", + SCHWAEMM_192_192_KEY_SIZE, + SCHWAEMM_192_192_NONCE_SIZE, + SCHWAEMM_192_192_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_192_192_aead_encrypt, + schwaemm_192_192_aead_decrypt +}; + +aead_cipher_t const schwaemm_128_128_cipher = { + "Schwaemm128-128", + SCHWAEMM_128_128_KEY_SIZE, + SCHWAEMM_128_128_NONCE_SIZE, + SCHWAEMM_128_128_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_128_128_aead_encrypt, + schwaemm_128_128_aead_decrypt +}; + +aead_cipher_t const schwaemm_256_256_cipher = { + "Schwaemm256-256", + SCHWAEMM_256_256_KEY_SIZE, + SCHWAEMM_256_256_NONCE_SIZE, + SCHWAEMM_256_256_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + schwaemm_256_256_aead_encrypt, + schwaemm_256_256_aead_decrypt +}; + +aead_hash_algorithm_t const esch_256_hash_algorithm = { + "Esch256", + sizeof(esch_256_hash_state_t), + ESCH_256_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + esch_256_hash, + (aead_hash_init_t)esch_256_hash_init, + (aead_hash_update_t)esch_256_hash_update, + (aead_hash_finalize_t)esch_256_hash_finalize, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +aead_hash_algorithm_t const esch_384_hash_algorithm = { + "Esch384", + sizeof(esch_384_hash_state_t), + ESCH_384_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + esch_384_hash, + (aead_hash_init_t)esch_384_hash_init, + (aead_hash_update_t)esch_384_hash_update, + (aead_hash_finalize_t)esch_384_hash_finalize, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +/** + * \def DOMAIN(value) + * \brief Build a domain separation value as a 32-bit word. + * + * \param value The base value. + * \return The domain separation value as a 32-bit word. + */ +#if defined(LW_UTIL_LITTLE_ENDIAN) +#define DOMAIN(value) (((uint32_t)(value)) << 24) +#else +#define DOMAIN(value) (value) +#endif + +/** + * \brief Rate at which bytes are processed by Schwaemm256-128. + */ +#define SCHWAEMM_256_128_RATE 32 + +/** + * \brief Pointer to the left of the state for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_RIGHT(s) \ + (SCHWAEMM_256_128_LEFT(s) + SCHWAEMM_256_128_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm256-128. + * + * \param s SPARKLE-384 state. + */ +#define schwaemm_256_128_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[8]; \ + t = s[1]; \ + s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[9]; \ + t = s[2]; \ + s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[10]; \ + t = s[3]; \ + s[3] = s[7] ^ s[11]; \ + s[7] ^= t ^ s[11]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm256-128. + * + * \param s SPARKLE-384 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_256_128_authenticate + (uint32_t s[SPARKLE_384_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_256_128_RATE) { + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); + sparkle_384(s, 7); + ad += SCHWAEMM_256_128_RATE; + adlen -= SCHWAEMM_256_128_RATE; + } + if (adlen == SCHWAEMM_256_128_RATE) { + s[11] ^= DOMAIN(0x05); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_128_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[11] ^= DOMAIN(0x04); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); +} + +int schwaemm_256_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + uint8_t block[SCHWAEMM_256_128_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_256_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); + memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_128_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + sparkle_384(s, 7); + memcpy(c, block, SCHWAEMM_256_128_RATE); + c += SCHWAEMM_256_128_RATE; + m += SCHWAEMM_256_128_RATE; + mlen -= SCHWAEMM_256_128_RATE; + } + if (mlen == SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_128_RATE); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + memcpy(c, block, SCHWAEMM_256_128_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_384(s, 11); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); + return 0; +} + +int schwaemm_256_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_256_128_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_256_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_128_LEFT(s), npub, SCHWAEMM_256_128_NONCE_SIZE); + memcpy(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_128_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_256_128_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + sparkle_384(s, 7); + c += SCHWAEMM_256_128_RATE; + m += SCHWAEMM_256_128_RATE; + clen -= SCHWAEMM_256_128_RATE; + } + if (clen == SCHWAEMM_256_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_128_RATE); + s[11] ^= DOMAIN(0x07); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_128_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[11] ^= DOMAIN(0x06); + schwaemm_256_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_256_128_RIGHT(s), k, SCHWAEMM_256_128_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_256_128_RIGHT(s), c, SCHWAEMM_256_128_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Schwaemm192-192. + */ +#define SCHWAEMM_192_192_RATE 24 + +/** + * \brief Pointer to the left of the state for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_RIGHT(s) \ + (SCHWAEMM_192_192_LEFT(s) + SCHWAEMM_192_192_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm192-192. + * + * \param s SPARKLE-384 state. + */ +#define schwaemm_192_192_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[3] ^ s[6]; \ + s[3] ^= t ^ s[9]; \ + t = s[1]; \ + s[1] = s[4] ^ s[7]; \ + s[4] ^= t ^ s[10]; \ + t = s[2]; \ + s[2] = s[5] ^ s[8]; \ + s[5] ^= t ^ s[11]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm192-192. + * + * \param s SPARKLE-384 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_192_192_authenticate + (uint32_t s[SPARKLE_384_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_192_192_RATE) { + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); + sparkle_384(s, 7); + ad += SCHWAEMM_192_192_RATE; + adlen -= SCHWAEMM_192_192_RATE; + } + if (adlen == SCHWAEMM_192_192_RATE) { + s[11] ^= DOMAIN(0x09); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_192_192_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[11] ^= DOMAIN(0x08); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); +} + +int schwaemm_192_192_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + uint8_t block[SCHWAEMM_192_192_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_192_192_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); + memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_192_192_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + sparkle_384(s, 7); + memcpy(c, block, SCHWAEMM_192_192_RATE); + c += SCHWAEMM_192_192_RATE; + m += SCHWAEMM_192_192_RATE; + mlen -= SCHWAEMM_192_192_RATE; + } + if (mlen == SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_192_192_RATE); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + memcpy(c, block, SCHWAEMM_192_192_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_384(s, 11); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); + return 0; +} + +int schwaemm_192_192_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_192_192_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_192_192_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_192_192_LEFT(s), npub, SCHWAEMM_192_192_NONCE_SIZE); + memcpy(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_KEY_SIZE); + sparkle_384(s, 11); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_192_192_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_192_192_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + sparkle_384(s, 7); + c += SCHWAEMM_192_192_RATE; + m += SCHWAEMM_192_192_RATE; + clen -= SCHWAEMM_192_192_RATE; + } + if (clen == SCHWAEMM_192_192_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_192_192_RATE); + s[11] ^= DOMAIN(0x0B); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_192_192_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[11] ^= DOMAIN(0x0A); + schwaemm_192_192_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_384(s, 11); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_192_192_RIGHT(s), k, SCHWAEMM_192_192_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_192_192_RIGHT(s), c, SCHWAEMM_192_192_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Schwaemm128-128. + */ +#define SCHWAEMM_128_128_RATE 16 + +/** + * \brief Pointer to the left of the state for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_RIGHT(s) \ + (SCHWAEMM_128_128_LEFT(s) + SCHWAEMM_128_128_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm128-128. + * + * \param s SPARKLE-256 state. + */ +#define schwaemm_128_128_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[2] ^ s[4]; \ + s[2] ^= t ^ s[6]; \ + t = s[1]; \ + s[1] = s[3] ^ s[5]; \ + s[3] ^= t ^ s[7]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm128-128. + * + * \param s SPARKLE-256 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_128_128_authenticate + (uint32_t s[SPARKLE_256_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_128_128_RATE) { + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); + sparkle_256(s, 7); + ad += SCHWAEMM_128_128_RATE; + adlen -= SCHWAEMM_128_128_RATE; + } + if (adlen == SCHWAEMM_128_128_RATE) { + s[7] ^= DOMAIN(0x05); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_128_128_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[7] ^= DOMAIN(0x04); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_256(s, 10); +} + +int schwaemm_128_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_256_STATE_SIZE]; + uint8_t block[SCHWAEMM_128_128_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_128_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); + memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); + sparkle_256(s, 10); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_128_128_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + sparkle_256(s, 7); + memcpy(c, block, SCHWAEMM_128_128_RATE); + c += SCHWAEMM_128_128_RATE; + m += SCHWAEMM_128_128_RATE; + mlen -= SCHWAEMM_128_128_RATE; + } + if (mlen == SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_128_128_RATE); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + memcpy(c, block, SCHWAEMM_128_128_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_256(s, 10); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); + return 0; +} + +int schwaemm_128_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_256_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_128_128_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_128_128_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_128_128_LEFT(s), npub, SCHWAEMM_128_128_NONCE_SIZE); + memcpy(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_KEY_SIZE); + sparkle_256(s, 10); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_128_128_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_128_128_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + sparkle_256(s, 7); + c += SCHWAEMM_128_128_RATE; + m += SCHWAEMM_128_128_RATE; + clen -= SCHWAEMM_128_128_RATE; + } + if (clen == SCHWAEMM_128_128_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_128_128_RATE); + s[7] ^= DOMAIN(0x07); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_128_128_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[7] ^= DOMAIN(0x06); + schwaemm_128_128_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_256(s, 10); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_128_128_RIGHT(s), k, SCHWAEMM_128_128_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_128_128_RIGHT(s), c, SCHWAEMM_128_128_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Schwaemm256-256. + */ +#define SCHWAEMM_256_256_RATE 32 + +/** + * \brief Pointer to the left of the state for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_LEFT(s) ((unsigned char *)&(s[0])) + +/** + * \brief Pointer to the right of the state for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_RIGHT(s) \ + (SCHWAEMM_256_256_LEFT(s) + SCHWAEMM_256_256_RATE) + +/** + * \brief Perform the rho1 and rate whitening steps for Schwaemm256-256. + * + * \param s SPARKLE-512 state. + */ +#define schwaemm_256_256_rho(s) \ + do { \ + uint32_t t = s[0]; \ + s[0] = s[4] ^ s[8]; \ + s[4] ^= t ^ s[12]; \ + t = s[1]; \ + s[1] = s[5] ^ s[9]; \ + s[5] ^= t ^ s[13]; \ + t = s[2]; \ + s[2] = s[6] ^ s[10]; \ + s[6] ^= t ^ s[14]; \ + t = s[3]; \ + s[3] = s[7] ^ s[11]; \ + s[7] ^= t ^ s[15]; \ + } while (0) + +/** + * \brief Authenticates the associated data for Schwaemm256-256. + * + * \param s SPARKLE-512 state. + * \param ad Points to the associated data. + * \param adlen Length of the associated data; must be >= 1. + */ +static void schwaemm_256_256_authenticate + (uint32_t s[SPARKLE_512_STATE_SIZE], + const unsigned char *ad, unsigned long long adlen) +{ + while (adlen > SCHWAEMM_256_256_RATE) { + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); + sparkle_512(s, 8); + ad += SCHWAEMM_256_256_RATE; + adlen -= SCHWAEMM_256_256_RATE; + } + if (adlen == SCHWAEMM_256_256_RATE) { + s[15] ^= DOMAIN(0x11); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, ad, SCHWAEMM_256_256_RATE); + } else { + unsigned temp = (unsigned)adlen; + s[15] ^= DOMAIN(0x10); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, ad, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_512(s, 12); +} + +int schwaemm_256_256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_512_STATE_SIZE]; + uint8_t block[SCHWAEMM_256_256_RATE]; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SCHWAEMM_256_256_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); + memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); + sparkle_512(s, 12); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_256_authenticate(s, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + if (mlen > 0) { + while (mlen > SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + sparkle_512(s, 8); + memcpy(c, block, SCHWAEMM_256_256_RATE); + c += SCHWAEMM_256_256_RATE; + m += SCHWAEMM_256_256_RATE; + mlen -= SCHWAEMM_256_256_RATE; + } + if (mlen == SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (block, (unsigned char *)s, m, SCHWAEMM_256_256_RATE); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + memcpy(c, block, SCHWAEMM_256_256_RATE); + } else { + unsigned temp = (unsigned)mlen; + lw_xor_block_2_src(block, (unsigned char *)s, m, temp); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + memcpy(c, block, temp); + } + sparkle_512(s, 12); + c += mlen; + } + + /* Generate the authentication tag */ + lw_xor_block_2_src + (c, SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); + return 0; +} + +int schwaemm_256_256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + uint32_t s[SPARKLE_512_STATE_SIZE]; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SCHWAEMM_256_256_TAG_SIZE) + return -1; + *mlen = clen - SCHWAEMM_256_256_TAG_SIZE; + + /* Initialize the state with the nonce and the key */ + memcpy(SCHWAEMM_256_256_LEFT(s), npub, SCHWAEMM_256_256_NONCE_SIZE); + memcpy(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_KEY_SIZE); + sparkle_512(s, 12); + + /* Process the associated data */ + if (adlen > 0) + schwaemm_256_256_authenticate(s, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SCHWAEMM_256_256_TAG_SIZE; + if (clen > 0) { + while (clen > SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + sparkle_512(s, 8); + c += SCHWAEMM_256_256_RATE; + m += SCHWAEMM_256_256_RATE; + clen -= SCHWAEMM_256_256_RATE; + } + if (clen == SCHWAEMM_256_256_RATE) { + lw_xor_block_2_src + (m, (unsigned char *)s, c, SCHWAEMM_256_256_RATE); + s[15] ^= DOMAIN(0x13); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, SCHWAEMM_256_256_RATE); + } else { + unsigned temp = (unsigned)clen; + lw_xor_block_2_src(m, (unsigned char *)s, c, temp); + s[15] ^= DOMAIN(0x12); + schwaemm_256_256_rho(s); + lw_xor_block((unsigned char *)s, m, temp); + ((unsigned char *)s)[temp] ^= 0x80; + } + sparkle_512(s, 12); + c += clen; + } + + /* Check the authentication tag */ + lw_xor_block(SCHWAEMM_256_256_RIGHT(s), k, SCHWAEMM_256_256_TAG_SIZE); + return aead_check_tag + (mtemp, *mlen, SCHWAEMM_256_256_RIGHT(s), c, SCHWAEMM_256_256_TAG_SIZE); +} + +/** + * \brief Rate at which bytes are processed by Esch256. + */ +#define ESCH_256_RATE 16 + +/** + * \brief Perform the M3 step for Esch256 to mix the input with the state. + * + * \param s SPARKLE-384 state. + * \param block Block of input data that has been padded to the rate. + * \param domain Domain separator for this phase. + */ +#define esch_256_m3(s, block, domain) \ + do { \ + uint32_t tx = (block)[0] ^ (block)[2]; \ + uint32_t ty = (block)[1] ^ (block)[3]; \ + tx = leftRotate16(tx ^ (tx << 16)); \ + ty = leftRotate16(ty ^ (ty << 16)); \ + s[0] ^= (block)[0] ^ ty; \ + s[1] ^= (block)[1] ^ tx; \ + s[2] ^= (block)[2] ^ ty; \ + s[3] ^= (block)[3] ^ tx; \ + if ((domain) != 0) \ + s[5] ^= DOMAIN(domain); \ + s[4] ^= ty; \ + s[5] ^= tx; \ + } while (0) + +/** @cond esch_256 */ + +/** + * \brief Word-based state for the Esch256 incremental hash mode. + */ +typedef union +{ + struct { + uint32_t state[SPARKLE_384_STATE_SIZE]; + uint32_t block[4]; + unsigned char count; + } s; + unsigned long long align; + +} esch_256_hash_state_wt; + +/** @endcond */ + +int esch_256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + uint32_t s[SPARKLE_384_STATE_SIZE]; + uint32_t block[ESCH_256_RATE / 4]; + memset(s, 0, sizeof(s)); + while (inlen > ESCH_256_RATE) { + memcpy(block, in, ESCH_256_RATE); + esch_256_m3(s, block, 0x00); + sparkle_384(s, 7); + in += ESCH_256_RATE; + inlen -= ESCH_256_RATE; + } + if (inlen == ESCH_256_RATE) { + memcpy(block, in, ESCH_256_RATE); + esch_256_m3(s, block, 0x02); + } else { + unsigned temp = (unsigned)inlen; + memcpy(block, in, temp); + ((unsigned char *)block)[temp] = 0x80; + memset(((unsigned char *)block) + temp + 1, 0, + ESCH_256_RATE - temp - 1); + esch_256_m3(s, block, 0x01); + } + sparkle_384(s, 11); + memcpy(out, s, ESCH_256_RATE); + sparkle_384(s, 7); + memcpy(out + ESCH_256_RATE, s, ESCH_256_RATE); + return 0; +} + +void esch_256_hash_init(esch_256_hash_state_t *state) +{ + memset(state, 0, sizeof(esch_256_hash_state_t)); +} + +void esch_256_hash_update + (esch_256_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; + unsigned temp; + while (inlen > 0) { + if (st->s.count == ESCH_256_RATE) { + esch_256_m3(st->s.state, st->s.block, 0x00); + sparkle_384(st->s.state, 7); + st->s.count = 0; + } + temp = ESCH_256_RATE - st->s.count; + if (temp > inlen) + temp = (unsigned)inlen; + memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); + st->s.count += temp; + in += temp; + inlen -= temp; + } +} + +void esch_256_hash_finalize + (esch_256_hash_state_t *state, unsigned char *out) +{ + esch_256_hash_state_wt *st = (esch_256_hash_state_wt *)state; + + /* Pad and process the last block */ + if (st->s.count == ESCH_256_RATE) { + esch_256_m3(st->s.state, st->s.block, 0x02); + } else { + unsigned temp = st->s.count; + ((unsigned char *)(st->s.block))[temp] = 0x80; + memset(((unsigned char *)(st->s.block)) + temp + 1, 0, + ESCH_256_RATE - temp - 1); + esch_256_m3(st->s.state, st->s.block, 0x01); + } + sparkle_384(st->s.state, 11); + + /* Generate the final hash value */ + memcpy(out, st->s.state, ESCH_256_RATE); + sparkle_384(st->s.state, 7); + memcpy(out + ESCH_256_RATE, st->s.state, ESCH_256_RATE); +} + +/** + * \brief Rate at which bytes are processed by Esch384. + */ +#define ESCH_384_RATE 16 + +/** + * \brief Perform the M4 step for Esch384 to mix the input with the state. + * + * \param s SPARKLE-512 state. + * \param block Block of input data that has been padded to the rate. + * \param domain Domain separator for this phase. + */ +#define esch_384_m4(s, block, domain) \ + do { \ + uint32_t tx = block[0] ^ block[2]; \ + uint32_t ty = block[1] ^ block[3]; \ + tx = leftRotate16(tx ^ (tx << 16)); \ + ty = leftRotate16(ty ^ (ty << 16)); \ + s[0] ^= block[0] ^ ty; \ + s[1] ^= block[1] ^ tx; \ + s[2] ^= block[2] ^ ty; \ + s[3] ^= block[3] ^ tx; \ + if ((domain) != 0) \ + s[7] ^= DOMAIN(domain); \ + s[4] ^= ty; \ + s[5] ^= tx; \ + s[6] ^= ty; \ + s[7] ^= tx; \ + } while (0) + +/** @cond esch_384 */ + +/** + * \brief Word-based state for the Esch384 incremental hash mode. + */ +typedef union +{ + struct { + uint32_t state[SPARKLE_512_STATE_SIZE]; + uint32_t block[4]; + unsigned char count; + } s; + unsigned long long align; + +} esch_384_hash_state_wt; + +/** @endcond */ + +int esch_384_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + uint32_t s[SPARKLE_512_STATE_SIZE]; + uint32_t block[ESCH_256_RATE / 4]; + memset(s, 0, sizeof(s)); + while (inlen > ESCH_384_RATE) { + memcpy(block, in, ESCH_384_RATE); + esch_384_m4(s, block, 0x00); + sparkle_512(s, 8); + in += ESCH_384_RATE; + inlen -= ESCH_384_RATE; + } + if (inlen == ESCH_384_RATE) { + memcpy(block, in, ESCH_384_RATE); + esch_384_m4(s, block, 0x02); + } else { + unsigned temp = (unsigned)inlen; + memcpy(block, in, temp); + ((unsigned char *)block)[temp] = 0x80; + memset(((unsigned char *)block) + temp + 1, 0, + ESCH_384_RATE - temp - 1); + esch_384_m4(s, block, 0x01); + } + sparkle_512(s, 12); + memcpy(out, s, ESCH_384_RATE); + sparkle_512(s, 8); + memcpy(out + ESCH_384_RATE, s, ESCH_384_RATE); + sparkle_512(s, 8); + memcpy(out + ESCH_384_RATE * 2, s, ESCH_384_RATE); + return 0; +} + +void esch_384_hash_init(esch_384_hash_state_t *state) +{ + memset(state, 0, sizeof(esch_384_hash_state_t)); +} + +void esch_384_hash_update + (esch_384_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; + unsigned temp; + while (inlen > 0) { + if (st->s.count == ESCH_384_RATE) { + esch_384_m4(st->s.state, st->s.block, 0x00); + sparkle_512(st->s.state, 8); + st->s.count = 0; + } + temp = ESCH_384_RATE - st->s.count; + if (temp > inlen) + temp = (unsigned)inlen; + memcpy(((unsigned char *)(st->s.block)) + st->s.count, in, temp); + st->s.count += temp; + in += temp; + inlen -= temp; + } +} + +void esch_384_hash_finalize + (esch_384_hash_state_t *state, unsigned char *out) +{ + esch_384_hash_state_wt *st = (esch_384_hash_state_wt *)state; + + /* Pad and process the last block */ + if (st->s.count == ESCH_384_RATE) { + esch_384_m4(st->s.state, st->s.block, 0x02); + } else { + unsigned temp = st->s.count; + ((unsigned char *)(st->s.block))[temp] = 0x80; + memset(((unsigned char *)(st->s.block)) + temp + 1, 0, + ESCH_384_RATE - temp - 1); + esch_384_m4(st->s.state, st->s.block, 0x01); + } + sparkle_512(st->s.state, 12); + + /* Generate the final hash value */ + memcpy(out, st->s.state, ESCH_384_RATE); + sparkle_512(st->s.state, 8); + memcpy(out + ESCH_384_RATE, st->s.state, ESCH_384_RATE); + sparkle_512(st->s.state, 8); + memcpy(out + ESCH_384_RATE * 2, st->s.state, ESCH_384_RATE); +} diff --git a/sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.h b/sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.h new file mode 100644 index 0000000..dd0999e --- /dev/null +++ b/sparkle/Implementations/crypto_hash/esch384v1/rhys/sparkle.h @@ -0,0 +1,515 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_SPARKLE_H +#define LWCRYPTO_SPARKLE_H + +#include "aead-common.h" + +/** + * \file sparkle.h + * \brief Encryption and hash algorithms based on the SPARKLE permutation. + * + * SPARKLE is a family of encryption and hash algorithms that are based + * around the SPARKLE permutation. There are three versions of the + * permutation with 256-bit, 384-bit, and 512-bit state sizes. + * The algorithms in the family are: + * + * \li Schwaemm256-128 with a 128-bit key, a 256-bit nonce, and a 128-bit tag. + * This is the primary encryption algorithm in the family. + * \li Schwaemm192-192 with a 192-bit key, a 192-bit nonce, and a 192-bit tag. + * \li Schwaemm128-128 with a 128-bit key, a 128-bit nonce, and a 128-bit tag. + * \li Schwaemm256-256 with a 256-bit key, a 256-bit nonce, and a 256-bit tag. + * \li Esch256 hash algorithm with a 256-bit digest output. This is the + * primary hash algorithm in the family. + * \li Esch384 hash algorithm with a 384-bit digest output. + * + * References: https://www.cryptolux.org/index.php/Sparkle + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for Schwaemm256-128. + */ +#define SCHWAEMM_256_128_NONCE_SIZE 32 + +/** + * \brief Size of the key for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_KEY_SIZE 24 + +/** + * \brief Size of the authentication tag for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_TAG_SIZE 24 + +/** + * \brief Size of the nonce for Schwaemm192-192. + */ +#define SCHWAEMM_192_192_NONCE_SIZE 24 + +/** + * \brief Size of the key for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_TAG_SIZE 16 + +/** + * \brief Size of the nonce for Schwaemm128-128. + */ +#define SCHWAEMM_128_128_NONCE_SIZE 16 + +/** + * \brief Size of the key for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_KEY_SIZE 32 + +/** + * \brief Size of the authentication tag for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_TAG_SIZE 32 + +/** + * \brief Size of the nonce for Schwaemm256-256. + */ +#define SCHWAEMM_256_256_NONCE_SIZE 32 + +/** + * \brief Size of the hash output for Esch256. + */ +#define ESCH_256_HASH_SIZE 32 + +/** + * \brief Size of the hash output for Esch384. + */ +#define ESCH_384_HASH_SIZE 48 + +/** + * \brief Meta-information block for the Schwaemm256-128 cipher. + */ +extern aead_cipher_t const schwaemm_256_128_cipher; + +/** + * \brief Meta-information block for the Schwaemm192-192 cipher. + */ +extern aead_cipher_t const schwaemm_192_192_cipher; + +/** + * \brief Meta-information block for the Schwaemm128-128 cipher. + */ +extern aead_cipher_t const schwaemm_128_128_cipher; + +/** + * \brief Meta-information block for the Schwaemm256-256 cipher. + */ +extern aead_cipher_t const schwaemm_256_256_cipher; + +/** + * \brief Meta-information block for the Esch256 hash algorithm. + */ +extern aead_hash_algorithm_t const esch_256_hash_algorithm; + +/** + * \brief Meta-information block for the Esch384 hash algorithm. + */ +extern aead_hash_algorithm_t const esch_384_hash_algorithm; + +/** + * \brief State information for the Esch256 incremental hash mode. + */ +typedef union +{ + struct { + unsigned char state[48]; /**< Current hash state */ + unsigned char block[16]; /**< Partial input data block */ + unsigned char count; /**< Number of bytes in the current block */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} esch_256_hash_state_t; + +/** + * \brief State information for the Esch384 incremental hash mode. + */ +typedef union +{ + struct { + unsigned char state[64]; /**< Current hash state */ + unsigned char block[16]; /**< Partial input data block */ + unsigned char count; /**< Number of bytes in the current block */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} esch_384_hash_state_t; + +/** + * \brief Encrypts and authenticates a packet with Schwaemm256-128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 32 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_256_128_aead_decrypt() + */ +int schwaemm_256_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm256-128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 32 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_256_128_aead_encrypt() + */ +int schwaemm_256_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with Schwaemm192-192. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 24 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 24 bytes in length. + * \param k Points to the 24 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_192_192_aead_decrypt() + */ +int schwaemm_192_192_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm192-192. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 24 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 24 bytes in length. + * \param k Points to the 24 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_192_192_aead_encrypt() + */ +int schwaemm_192_192_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with Schwaemm128-128. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_128_128_aead_decrypt() + */ +int schwaemm_128_128_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm128-128. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_128_128_aead_encrypt() + */ +int schwaemm_128_128_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Encrypts and authenticates a packet with Schwaemm256-256. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa schwaemm_256_256_aead_decrypt() + */ +int schwaemm_256_256_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Schwaemm256-256. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa schwaemm_256_256_aead_encrypt() + */ +int schwaemm_256_256_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with Esch256 to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * ESCH_256_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int esch_256_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an Esch256 hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa esch_256_hash_update(), esch_256_hash_finalize(), esch_256_hash() + */ +void esch_256_hash_init(esch_256_hash_state_t *state); + +/** + * \brief Updates an Esch256 state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa esch_256_hash_init(), esch_256_hash_finalize() + */ +void esch_256_hash_update + (esch_256_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an Esch256 hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 32-byte hash value. + * + * \sa esch_256_hash_init(), esch_256_hash_update() + */ +void esch_256_hash_finalize + (esch_256_hash_state_t *state, unsigned char *out); + +/** + * \brief Hashes a block of input data with Esch384 to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * ESCH_384_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int esch_384_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for an Esch384 hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa esch_384_hash_update(), esch_384_hash_finalize(), esch_384_hash() + */ +void esch_384_hash_init(esch_384_hash_state_t *state); + +/** + * \brief Updates an Esch384 state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa esch_384_hash_init(), esch_384_hash_finalize() + */ +void esch_384_hash_update + (esch_384_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from an Esch384 hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 48-byte hash value. + * + * \sa esch_384_hash_init(), esch_384_hash_update() + */ +void esch_384_hash_finalize + (esch_384_hash_state_t *state, unsigned char *out); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.c b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.h b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/api.h b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/encrypt.c b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/encrypt.c deleted file mode 100644 index facb770..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "spix.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return spix_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return spix_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-256-spix-avr.S b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-256-spix-avr.S deleted file mode 100644 index f8cadd9..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-256-spix-avr.S +++ /dev/null @@ -1,1129 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 72 -table_0: - .byte 15 - .byte 71 - .byte 8 - .byte 100 - .byte 4 - .byte 178 - .byte 134 - .byte 107 - .byte 67 - .byte 181 - .byte 226 - .byte 111 - .byte 241 - .byte 55 - .byte 137 - .byte 44 - .byte 68 - .byte 150 - .byte 230 - .byte 221 - .byte 115 - .byte 238 - .byte 202 - .byte 153 - .byte 229 - .byte 76 - .byte 23 - .byte 234 - .byte 11 - .byte 245 - .byte 142 - .byte 15 - .byte 71 - .byte 7 - .byte 100 - .byte 4 - .byte 178 - .byte 130 - .byte 107 - .byte 67 - .byte 181 - .byte 161 - .byte 111 - .byte 241 - .byte 55 - .byte 120 - .byte 44 - .byte 68 - .byte 150 - .byte 162 - .byte 221 - .byte 115 - .byte 238 - .byte 185 - .byte 153 - .byte 229 - .byte 76 - .byte 242 - .byte 234 - .byte 11 - .byte 245 - .byte 133 - .byte 15 - .byte 71 - .byte 7 - .byte 35 - .byte 4 - .byte 178 - .byte 130 - .byte 217 - .byte 67 - .byte 181 - - .text -.global sliscp_light256_permute_spix - .type sliscp_light256_permute_spix, @function -sliscp_light256_permute_spix: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 32 - ld r21,Z - ldd r20,Z+1 - ldd r19,Z+2 - ldd r18,Z+3 - ldd r3,Z+4 - ldd r2,Z+5 - ldd r27,Z+6 - ldd r26,Z+7 - ldd r7,Z+16 - ldd r6,Z+17 - ldd r5,Z+18 - ldd r4,Z+19 - ldd r11,Z+20 - ldd r10,Z+21 - ldd r9,Z+22 - ldd r8,Z+23 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r26 - std Y+6,r27 - std Y+7,r2 - std Y+8,r3 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - ldd r21,Z+8 - ldd r20,Z+9 - ldd r19,Z+10 - ldd r18,Z+11 - ldd r3,Z+24 - ldd r2,Z+25 - ldd r27,Z+26 - ldd r26,Z+27 - ldd r7,Z+12 - ldd r6,Z+13 - ldd r5,Z+14 - ldd r4,Z+15 - ldd r11,Z+28 - ldd r10,Z+29 - ldd r9,Z+30 - ldd r8,Z+31 - push r31 - push r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r23,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r23 -#endif - mov r30,r1 -52: -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - com r27 - com r2 - com r3 - ldi r24,255 - lsr r23 - rol r24 - eor r26,r24 - movw r12,r26 - movw r14,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r26 - and r13,r27 - and r14,r2 - and r15,r3 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r23 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - com r27 - com r2 - com r3 - ldi r24,255 - lsr r23 - rol r24 - eor r26,r24 - movw r12,r26 - movw r14,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r26 - and r13,r27 - and r14,r2 - and r15,r3 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r23 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - com r27 - com r2 - com r3 - ldi r24,255 - lsr r23 - rol r24 - eor r26,r24 - movw r12,r26 - movw r14,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r26 - and r13,r27 - and r14,r2 - and r15,r3 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r23 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r26,r12 - eor r27,r13 - eor r2,r14 - eor r3,r15 - com r27 - com r2 - com r3 - ldi r24,255 - lsr r23 - rol r24 - eor r26,r24 - movw r12,r26 - movw r14,r2 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r26 - and r13,r27 - and r14,r2 - and r15,r3 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r23 - rol r24 - eor r18,r24 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - inc r30 - movw r12,r4 - movw r14,r6 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r4 - and r13,r5 - and r14,r6 - and r15,r7 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - com r9 - com r10 - com r11 - ldi r24,255 - lsr r23 - rol r24 - eor r8,r24 - movw r12,r8 - movw r14,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r8 - and r13,r9 - and r14,r10 - and r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r5 - com r6 - com r7 - ldi r24,255 - lsr r23 - rol r24 - eor r4,r24 - movw r12,r4 - movw r14,r6 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r4 - and r13,r5 - and r14,r6 - and r15,r7 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - com r9 - com r10 - com r11 - ldi r24,255 - lsr r23 - rol r24 - eor r8,r24 - movw r12,r8 - movw r14,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r8 - and r13,r9 - and r14,r10 - and r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r5 - com r6 - com r7 - ldi r24,255 - lsr r23 - rol r24 - eor r4,r24 - movw r12,r4 - movw r14,r6 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r4 - and r13,r5 - and r14,r6 - and r15,r7 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - com r9 - com r10 - com r11 - ldi r24,255 - lsr r23 - rol r24 - eor r8,r24 - movw r12,r8 - movw r14,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r8 - and r13,r9 - and r14,r10 - and r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r5 - com r6 - com r7 - ldi r24,255 - lsr r23 - rol r24 - eor r4,r24 - movw r12,r4 - movw r14,r6 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r4 - and r13,r5 - and r14,r6 - and r15,r7 - eor r8,r12 - eor r9,r13 - eor r10,r14 - eor r11,r15 - com r9 - com r10 - com r11 - ldi r24,255 - lsr r23 - rol r24 - eor r8,r24 - movw r12,r8 - movw r14,r10 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r8 - and r13,r9 - and r14,r10 - and r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r5 - com r6 - com r7 - ldi r24,255 - lsr r23 - rol r24 - eor r4,r24 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - com r12 - com r13 - com r14 - com r15 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - com r18 - com r19 - com r20 - com r21 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - std Y+9,r4 - std Y+10,r5 - std Y+11,r6 - std Y+12,r7 - movw r4,r12 - movw r6,r14 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - com r13 - com r14 - com r15 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r12,r23 - inc r30 - eor r12,r26 - eor r13,r27 - eor r14,r2 - eor r15,r3 - std Y+5,r26 - std Y+6,r27 - std Y+7,r2 - std Y+8,r3 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r2,Y+15 - ldd r3,Y+16 - com r27 - com r2 - com r3 -#if defined(RAMPZ) - elpm r23,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r23,Z -#elif defined(__AVR_TINY__) - ld r23,Z -#else - lpm - mov r23,r0 -#endif - eor r26,r23 - inc r30 - eor r26,r8 - eor r27,r9 - eor r2,r10 - eor r3,r11 - std Y+13,r8 - std Y+14,r9 - std Y+15,r10 - std Y+16,r11 - movw r8,r12 - movw r10,r14 - dec r22 - breq 5866f - rjmp 52b -5866: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - std Z+8,r21 - std Z+9,r20 - std Z+10,r19 - std Z+11,r18 - std Z+24,r3 - std Z+25,r2 - std Z+26,r27 - std Z+27,r26 - std Z+12,r7 - std Z+13,r6 - std Z+14,r5 - std Z+15,r4 - std Z+28,r11 - std Z+29,r10 - std Z+30,r9 - std Z+31,r8 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r2,Y+7 - ldd r3,Y+8 - ldd r4,Y+9 - ldd r5,Y+10 - ldd r6,Y+11 - ldd r7,Y+12 - ldd r8,Y+13 - ldd r9,Y+14 - ldd r10,Y+15 - ldd r11,Y+16 - st Z,r21 - std Z+1,r20 - std Z+2,r19 - std Z+3,r18 - std Z+4,r3 - std Z+5,r2 - std Z+6,r27 - std Z+7,r26 - std Z+16,r7 - std Z+17,r6 - std Z+18,r5 - std Z+19,r4 - std Z+20,r11 - std Z+21,r10 - std Z+22,r9 - std Z+23,r8 - adiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sliscp_light256_permute_spix, .-sliscp_light256_permute_spix - - .text -.global sliscp_light256_swap_spix - .type sliscp_light256_swap_spix, @function -sliscp_light256_swap_spix: - movw r30,r24 -.L__stack_usage = 2 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r26,Z+26 - ldd r27,Z+27 - std Z+24,r18 - std Z+25,r19 - std Z+26,r20 - std Z+27,r21 - std Z+12,r22 - std Z+13,r23 - std Z+14,r26 - std Z+15,r27 - ret - .size sliscp_light256_swap_spix, .-sliscp_light256_swap_spix - -#endif diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.c b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.c deleted file mode 100644 index dd3a688..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sliscp-light.h" - -#if !defined(__AVR__) - -/** - * \brief Performs one round of the Simeck-64 block cipher. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - */ -#define simeck64_round(x, y) \ - do { \ - (y) ^= (leftRotate5((x)) & (x)) ^ leftRotate1((x)) ^ \ - 0xFFFFFFFEU ^ (_rc & 1); \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 64-bit block with the 8 round version of Simeck-64. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck64_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck64_round(x, y); /* Round 1 */ \ - simeck64_round(y, x); /* Round 2 */ \ - simeck64_round(x, y); /* Round 3 */ \ - simeck64_round(y, x); /* Round 4 */ \ - simeck64_round(x, y); /* Round 5 */ \ - simeck64_round(y, x); /* Round 6 */ \ - simeck64_round(x, y); /* Round 7 */ \ - simeck64_round(y, x); /* Round 8 */ \ - } while (0) - -/* Helper macros for 48-bit left rotations */ -#define leftRotate5_48(x) (((x) << 5) | ((x) >> 19)) -#define leftRotate1_48(x) (((x) << 1) | ((x) >> 23)) - -/** - * \brief Performs one round of the Simeck-48 block cipher. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - */ -#define simeck48_round(x, y) \ - do { \ - (y) ^= (leftRotate5_48((x)) & (x)) ^ leftRotate1_48((x)) ^ \ - 0x00FFFFFEU ^ (_rc & 1); \ - (y) &= 0x00FFFFFFU; \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 48-bit block with the 6 round version of Simeck-48. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck48_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck48_round(x, y); /* Round 1 */ \ - simeck48_round(y, x); /* Round 2 */ \ - simeck48_round(x, y); /* Round 3 */ \ - simeck48_round(y, x); /* Round 4 */ \ - simeck48_round(x, y); /* Round 5 */ \ - simeck48_round(y, x); /* Round 6 */ \ - } while (0) - -/* Interleaved rc0, rc1, sc0, and sc1 values for each round */ -static unsigned char const sliscp_light256_RC[18 * 4] = { - 0x0f, 0x47, 0x08, 0x64, 0x04, 0xb2, 0x86, 0x6b, - 0x43, 0xb5, 0xe2, 0x6f, 0xf1, 0x37, 0x89, 0x2c, - 0x44, 0x96, 0xe6, 0xdd, 0x73, 0xee, 0xca, 0x99, - 0xe5, 0x4c, 0x17, 0xea, 0x0b, 0xf5, 0x8e, 0x0f, - 0x47, 0x07, 0x64, 0x04, 0xb2, 0x82, 0x6b, 0x43, - 0xb5, 0xa1, 0x6f, 0xf1, 0x37, 0x78, 0x2c, 0x44, - 0x96, 0xa2, 0xdd, 0x73, 0xee, 0xb9, 0x99, 0xe5, - 0x4c, 0xf2, 0xea, 0x0b, 0xf5, 0x85, 0x0f, 0x47, - 0x07, 0x23, 0x04, 0xb2, 0x82, 0xd9, 0x43, 0xb5 -}; - -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 24); /* Assumes the block is pre-swapped */ - x4 = be_load_word32(block + 16); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 12); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 8, x2); - be_store_word32(block + 24, x3); /* Assumes the block is pre-swapped */ - be_store_word32(block + 16, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 12, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spix(unsigned char block[32]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 12); - t2 = le_load_word32(block + 24); - le_store_word32(block + 24, t1); - le_store_word32(block + 12, t2); -} - -void sliscp_light256_permute_spoc(unsigned char block[32]) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x3 = be_load_word32(block + 20); - x4 = be_load_word32(block + 8); - x5 = be_load_word32(block + 12); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 16, x2); /* Assumes the block is pre-swapped */ - be_store_word32(block + 20, x3); - be_store_word32(block + 8, x4); - be_store_word32(block + 12, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spoc(unsigned char block[32]) -{ - uint64_t t1, t2; - t1 = le_load_word64(block + 8); - t2 = le_load_word64(block + 16); - le_store_word64(block + 16, t1); - le_store_word64(block + 8, t2); -} - -/* Load a big-endian 24-bit word from a byte buffer */ -#define be_load_word24(ptr) \ - ((((uint32_t)((ptr)[0])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[2]))) - -/* Store a big-endian 24-bit word into a byte buffer */ -#define be_store_word24(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 16); \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)_x; \ - } while (0) - -void sliscp_light192_permute(unsigned char block[24]) -{ - /* Interleaved rc0, rc1, sc0, and sc1 values for each round */ - static unsigned char const RC[18 * 4] = { - 0x07, 0x27, 0x08, 0x29, 0x04, 0x34, 0x0c, 0x1d, - 0x06, 0x2e, 0x0a, 0x33, 0x25, 0x19, 0x2f, 0x2a, - 0x17, 0x35, 0x38, 0x1f, 0x1c, 0x0f, 0x24, 0x10, - 0x12, 0x08, 0x36, 0x18, 0x3b, 0x0c, 0x0d, 0x14, - 0x26, 0x0a, 0x2b, 0x1e, 0x15, 0x2f, 0x3e, 0x31, - 0x3f, 0x38, 0x01, 0x09, 0x20, 0x24, 0x21, 0x2d, - 0x30, 0x36, 0x11, 0x1b, 0x28, 0x0d, 0x39, 0x16, - 0x3c, 0x2b, 0x05, 0x3d, 0x22, 0x3e, 0x27, 0x03, - 0x13, 0x01, 0x34, 0x02, 0x1a, 0x21, 0x2e, 0x23 - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables. Each 24-bit block is - * placed into a separate 32-bit word which improves efficiency below */ - x0 = be_load_word24(block); - x1 = be_load_word24(block + 3); - x2 = be_load_word24(block + 6); - x3 = be_load_word24(block + 9); - x4 = be_load_word24(block + 12); - x5 = be_load_word24(block + 15); - x6 = be_load_word24(block + 18); - x7 = be_load_word24(block + 21); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-48 to two of the 48-bit sub-blocks */ - simeck48_box(x2, x3, rc[0]); - simeck48_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0x00FFFFFFU; - x1 ^= 0x00FFFF00U ^ rc[2]; - x4 ^= 0x00FFFFFFU; - x5 ^= 0x00FFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word24(block, x0); - be_store_word24(block + 3, x1); - be_store_word24(block + 6, x2); - be_store_word24(block + 9, x3); - be_store_word24(block + 12, x4); - be_store_word24(block + 15, x5); - be_store_word24(block + 18, x6); - be_store_word24(block + 21, x7); -} - -void sliscp_light320_permute(unsigned char block[40]) -{ - /* Interleaved rc0, rc1, rc2, sc0, sc1, and sc2 values for each round */ - static unsigned char const RC[16 * 6] = { - 0x07, 0x53, 0x43, 0x50, 0x28, 0x14, 0x0a, 0x5d, - 0xe4, 0x5c, 0xae, 0x57, 0x9b, 0x49, 0x5e, 0x91, - 0x48, 0x24, 0xe0, 0x7f, 0xcc, 0x8d, 0xc6, 0x63, - 0xd1, 0xbe, 0x32, 0x53, 0xa9, 0x54, 0x1a, 0x1d, - 0x4e, 0x60, 0x30, 0x18, 0x22, 0x28, 0x75, 0x68, - 0x34, 0x9a, 0xf7, 0x6c, 0x25, 0xe1, 0x70, 0x38, - 0x62, 0x82, 0xfd, 0xf6, 0x7b, 0xbd, 0x96, 0x47, - 0xf9, 0x9d, 0xce, 0x67, 0x71, 0x6b, 0x76, 0x40, - 0x20, 0x10, 0xaa, 0x88, 0xa0, 0x4f, 0x27, 0x13, - 0x2b, 0xdc, 0xb0, 0xbe, 0x5f, 0x2f, 0xe9, 0x8b, - 0x09, 0x5b, 0xad, 0xd6, 0xcf, 0x59, 0x1e, 0xe9, - 0x74, 0xba, 0xb7, 0xc6, 0xad, 0x7f, 0x3f, 0x1f - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 12); - x4 = be_load_word32(block + 4); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - x8 = be_load_word32(block + 32); - x9 = be_load_word32(block + 36); - - /* Perform all permutation rounds */ - for (round = 0; round < 16; ++round, rc += 6) { - /* Apply Simeck-64 to three of the 64-bit sub-blocks */ - simeck64_box(x0, x1, rc[0]); - simeck64_box(x4, x5, rc[1]); - simeck64_box(x8, x9, rc[2]); - x6 ^= x8; - x7 ^= x9; - x2 ^= x4; - x3 ^= x5; - x8 ^= x0; - x9 ^= x1; - - /* Add step constants */ - x2 ^= 0xFFFFFFFFU; - x3 ^= 0xFFFFFF00U ^ rc[3]; - x6 ^= 0xFFFFFFFFU; - x7 ^= 0xFFFFFF00U ^ rc[4]; - x8 ^= 0xFFFFFFFFU; - x9 ^= 0xFFFFFF00U ^ rc[5]; - - /* Rotate the sub-blocks */ - t0 = x8; - t1 = x9; - x8 = x2; - x9 = x3; - x2 = x4; - x3 = x5; - x4 = x0; - x5 = x1; - x0 = x6; - x1 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 16, x1); /* Assumes the block is pre-swapped */ - be_store_word32(block + 8, x2); - be_store_word32(block + 12, x3); - be_store_word32(block + 4, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); - be_store_word32(block + 32, x8); - be_store_word32(block + 36, x9); -} - -void sliscp_light320_swap(unsigned char block[40]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 4); - t2 = le_load_word32(block + 16); - le_store_word32(block + 16, t1); - le_store_word32(block + 4, t2); -} - -#endif /* !__AVR__ */ diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.h b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.h deleted file mode 100644 index 8a5e8d5..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-sliscp-light.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SLISCP_LIGHT_H -#define LW_INTERNAL_SLISCP_LIGHT_H - -/** - * \file internal-sliscp-light.h - * \brief sLiSCP-light permutation - * - * There are three variants of sLiSCP-light in use in the NIST submissions: - * - * \li sLiSCP-light-256 with a 256-bit block size, used in SPIX and SpoC. - * \li sLiSCP-light-192 with a 192-bit block size, used in SpoC. - * \li sLiSCP-light-320 with a 320-bit block size, used in ACE. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/ace, - * https://uwaterloo.ca/communications-security-lab/lwc/spix, - * https://uwaterloo.ca/communications-security-lab/lwc/spoc - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for sLiSCP-light-256. - */ -#define SLISCP_LIGHT256_STATE_SIZE 32 - -/** - * \brief Size of the state for sLiSCP-light-192. - */ -#define SLISCP_LIGHT192_STATE_SIZE 24 - -/** - * \brief Size of the state for sLiSCP-light-320. - */ -#define SLISCP_LIGHT320_STATE_SIZE 40 - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SPIX cipher. SPIX places the rate bytes at - * positions 8, 9, 10, 11, 24, 25, 26, and 27. - * - * This function assumes that bytes 24-27 have been pre-swapped with - * bytes 12-15 so that the rate portion of the state is contiguous. - * - * The sliscp_light256_swap_spix() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spix() - */ -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SPIX. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spix() - */ -void sliscp_light256_swap_spix(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SpoC-128 cipher. SpoC-128 interleaves the - * rate bytes and the mask bytes. This version assumes that the - * rate and mask are in contiguous bytes of the state. - * - * SpoC-128 absorbs bytes using the mask bytes of the state at offsets - * 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, and 31. - * It squeezes bytes using the rate bytes of the state at offsets - * 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, and 23. - * - * This function assumes that bytes 8-15 have been pre-swapped with 16-23 - * so that the rate and mask portions of the state are contiguous. - * - * The sliscp_light256_swap_spoc() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spoc() - */ -void sliscp_light256_permute_spoc(unsigned char block[32]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spoc() - */ -void sliscp_light256_swap_spoc(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 192-bit block. - * - * \param block Points to the block to be permuted. - */ -void sliscp_light192_permute(unsigned char block[24]); - -/** - * \brief Performs the sLiSCP-light permutation on a 320-bit block. - * - * \param block Points to the block to be permuted. - * - * The ACE specification refers to this permutation as "ACE" but that - * can be confused with the name of the AEAD mode so we call this - * permutation "sLiSCP-light-320" instead. - * - * ACE absorbs and squeezes data at the rate bytes 0, 1, 2, 3, 16, 17, 18, 19. - * Efficiency can suffer because of the discontinuity in rate byte positions. - * - * To counteract this, we assume that the input to the permutation has been - * pre-swapped: bytes 4, 5, 6, 7 are swapped with bytes 16, 17, 18, 19 so - * that the rate is contiguous at the start of the state. - * - * The sliscp_light320_swap() function can be used to switch between the - * canonical order and the pre-swapped order. - * - * \sa sliscp_light320_swap() - */ -void sliscp_light320_permute(unsigned char block[40]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 320-bit block. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light320_permute() - */ -void sliscp_light320_swap(unsigned char block[40]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-util.h b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.c b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.c deleted file mode 100644 index 7fc8f6a..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.c +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "spix.h" -#include "internal-sliscp-light.h" -#include "internal-util.h" -#include - -/** - * \brief Size of the state for the internal sLiSCP-light permutation. - */ -#define SPIX_STATE_SIZE SLISCP_LIGHT256_STATE_SIZE - -/** - * \brief Rate for absorbing data into the sLiSCP-light state and for - * squeezing data out again. - */ -#define SPIX_RATE 8 - -aead_cipher_t const spix_cipher = { - "SPIX", - SPIX_KEY_SIZE, - SPIX_NONCE_SIZE, - SPIX_TAG_SIZE, - AEAD_FLAG_NONE, - spix_aead_encrypt, - spix_aead_decrypt -}; - -/* Indices of where a rate byte is located in the state. We don't - * need this array any more because sliscp_light256_permute_spix() - * operates on byte-swapped states where the rate bytes are contiguous - * in the bytes 8 to 15 */ -/* -static unsigned char const spix_rate_posn[8] = { - 8, 9, 10, 11, 24, 25, 26, 27 -}; -*/ - -/** - * \brief Initializes the SPIX state. - * - * \param state sLiSCP-light-256 permutation state. - * \param k Points to the 128-bit key. - * \param npub Points to the 128-bit nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void spix_init - (unsigned char state[SPIX_STATE_SIZE], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state by interleaving the key and nonce */ - memcpy(state, npub, 8); - memcpy(state + 8, k, 8); - memcpy(state + 16, npub + 8, 8); - memcpy(state + 24, k + 8, 8); - sliscp_light256_swap_spix(state); - - /* Run the permutation to scramble the initial state */ - sliscp_light256_permute_spix(state, 18); - - /* Absorb the key in two further permutation operations */ - lw_xor_block(state + 8, k, 8); - sliscp_light256_permute_spix(state, 18); - lw_xor_block(state + 8, k + 8, 8); - sliscp_light256_permute_spix(state, 18); - - /* Absorb the associated data into the state */ - if (adlen != 0) { - while (adlen >= SPIX_RATE) { - lw_xor_block(state + 8, ad, SPIX_RATE); - state[SPIX_STATE_SIZE - 1] ^= 0x01; /* domain separation */ - sliscp_light256_permute_spix(state, 9); - ad += SPIX_RATE; - adlen -= SPIX_RATE; - } - temp = (unsigned)adlen; - lw_xor_block(state + 8, ad, temp); - state[temp + 8] ^= 0x80; /* padding */ - state[SPIX_STATE_SIZE - 1] ^= 0x01; /* domain separation */ - sliscp_light256_permute_spix(state, 9); - } -} - -/** - * \brief Finalizes the SPIX encryption or decryption operation. - * - * \param state sLiSCP-light-256 permutation state. - * \param k Points to the 128-bit key. - * \param tag Points to the 16 byte buffer to receive the computed tag. - */ -static void spix_finalize - (unsigned char state[SPIX_STATE_SIZE], const unsigned char *k, - unsigned char *tag) -{ - /* Absorb the key into the state again */ - lw_xor_block(state + 8, k, 8); - sliscp_light256_permute_spix(state, 18); - lw_xor_block(state + 8, k + 8, 8); - sliscp_light256_permute_spix(state, 18); - - /* Copy out the authentication tag */ - sliscp_light256_swap_spix(state); - memcpy(tag, state + 8, 8); - memcpy(tag + 8, state + 24, 8); -} - -int spix_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPIX_STATE_SIZE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPIX_TAG_SIZE; - - /* Initialize the SPIX state and absorb the associated data */ - spix_init(state, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= SPIX_RATE) { - lw_xor_block_2_dest(c, state + 8, m, SPIX_RATE); - state[SPIX_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light256_permute_spix(state, 9); - c += SPIX_RATE; - m += SPIX_RATE; - mlen -= SPIX_RATE; - } - temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state + 8, m, temp); - state[temp + 8] ^= 0x80; /* padding */ - state[SPIX_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light256_permute_spix(state, 9); - c += mlen; - - /* Generate the authentication tag */ - spix_finalize(state, k, c); - return 0; -} - -int spix_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPIX_STATE_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPIX_TAG_SIZE) - return -1; - *mlen = clen - SPIX_TAG_SIZE; - - /* Initialize the SPIX state and absorb the associated data */ - spix_init(state, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPIX_TAG_SIZE; - while (clen >= SPIX_RATE) { - lw_xor_block_swap(m, state + 8, c, SPIX_RATE); - state[SPIX_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light256_permute_spix(state, 9); - c += SPIX_RATE; - m += SPIX_RATE; - clen -= SPIX_RATE; - } - temp = (unsigned)clen; - lw_xor_block_swap(m, state + 8, c, temp); - state[temp + 8] ^= 0x80; /* padding */ - state[SPIX_STATE_SIZE - 1] ^= 0x02; /* domain separation */ - sliscp_light256_permute_spix(state, 9); - c += clen; - - /* Finalize the SPIX state and compare against the authentication tag */ - spix_finalize(state, k, state); - return aead_check_tag(mtemp, *mlen, state, c, SPIX_TAG_SIZE); -} diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.h b/spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.h deleted file mode 100644 index 844c514..0000000 --- a/spix/Implementations/crypto_aead/spix128v1/rhys-avr/spix.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPIX_H -#define LWCRYPTO_SPIX_H - -#include "aead-common.h" - -/** - * \file spix.h - * \brief SPIX authenticated encryption algorithm. - * - * SPIX is an authenticated encryption algorithm with a 128-bit key, - * a 128-bit nonce, and a 128-bit tag. It uses the MonkeyDuplex - * construction on top of the 256-bit sLiSCP-light permutation. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/spix - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for SPIX. - */ -#define SPIX_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SPIX. - */ -#define SPIX_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SPIX. - */ -#define SPIX_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the SPIX cipher. - */ -extern aead_cipher_t const spix_cipher; - -/** - * \brief Encrypts and authenticates a packet with SPIX. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spix_aead_decrypt() - */ -int spix_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SPIX. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spix_aead_encrypt() - */ -int spix_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-256-spix-avr.S b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-256-spix-avr.S new file mode 100644 index 0000000..f8cadd9 --- /dev/null +++ b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-256-spix-avr.S @@ -0,0 +1,1129 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 72 +table_0: + .byte 15 + .byte 71 + .byte 8 + .byte 100 + .byte 4 + .byte 178 + .byte 134 + .byte 107 + .byte 67 + .byte 181 + .byte 226 + .byte 111 + .byte 241 + .byte 55 + .byte 137 + .byte 44 + .byte 68 + .byte 150 + .byte 230 + .byte 221 + .byte 115 + .byte 238 + .byte 202 + .byte 153 + .byte 229 + .byte 76 + .byte 23 + .byte 234 + .byte 11 + .byte 245 + .byte 142 + .byte 15 + .byte 71 + .byte 7 + .byte 100 + .byte 4 + .byte 178 + .byte 130 + .byte 107 + .byte 67 + .byte 181 + .byte 161 + .byte 111 + .byte 241 + .byte 55 + .byte 120 + .byte 44 + .byte 68 + .byte 150 + .byte 162 + .byte 221 + .byte 115 + .byte 238 + .byte 185 + .byte 153 + .byte 229 + .byte 76 + .byte 242 + .byte 234 + .byte 11 + .byte 245 + .byte 133 + .byte 15 + .byte 71 + .byte 7 + .byte 35 + .byte 4 + .byte 178 + .byte 130 + .byte 217 + .byte 67 + .byte 181 + + .text +.global sliscp_light256_permute_spix + .type sliscp_light256_permute_spix, @function +sliscp_light256_permute_spix: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 32 + ld r21,Z + ldd r20,Z+1 + ldd r19,Z+2 + ldd r18,Z+3 + ldd r3,Z+4 + ldd r2,Z+5 + ldd r27,Z+6 + ldd r26,Z+7 + ldd r7,Z+16 + ldd r6,Z+17 + ldd r5,Z+18 + ldd r4,Z+19 + ldd r11,Z+20 + ldd r10,Z+21 + ldd r9,Z+22 + ldd r8,Z+23 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r26 + std Y+6,r27 + std Y+7,r2 + std Y+8,r3 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + ldd r21,Z+8 + ldd r20,Z+9 + ldd r19,Z+10 + ldd r18,Z+11 + ldd r3,Z+24 + ldd r2,Z+25 + ldd r27,Z+26 + ldd r26,Z+27 + ldd r7,Z+12 + ldd r6,Z+13 + ldd r5,Z+14 + ldd r4,Z+15 + ldd r11,Z+28 + ldd r10,Z+29 + ldd r9,Z+30 + ldd r8,Z+31 + push r31 + push r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r23,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r23 +#endif + mov r30,r1 +52: +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + com r27 + com r2 + com r3 + ldi r24,255 + lsr r23 + rol r24 + eor r26,r24 + movw r12,r26 + movw r14,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r26 + and r13,r27 + and r14,r2 + and r15,r3 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r23 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + com r27 + com r2 + com r3 + ldi r24,255 + lsr r23 + rol r24 + eor r26,r24 + movw r12,r26 + movw r14,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r26 + and r13,r27 + and r14,r2 + and r15,r3 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r23 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + com r27 + com r2 + com r3 + ldi r24,255 + lsr r23 + rol r24 + eor r26,r24 + movw r12,r26 + movw r14,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r26 + and r13,r27 + and r14,r2 + and r15,r3 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r23 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r26,r12 + eor r27,r13 + eor r2,r14 + eor r3,r15 + com r27 + com r2 + com r3 + ldi r24,255 + lsr r23 + rol r24 + eor r26,r24 + movw r12,r26 + movw r14,r2 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r26 + and r13,r27 + and r14,r2 + and r15,r3 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r23 + rol r24 + eor r18,r24 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + inc r30 + movw r12,r4 + movw r14,r6 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r4 + and r13,r5 + and r14,r6 + and r15,r7 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + com r9 + com r10 + com r11 + ldi r24,255 + lsr r23 + rol r24 + eor r8,r24 + movw r12,r8 + movw r14,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r8 + and r13,r9 + and r14,r10 + and r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r5 + com r6 + com r7 + ldi r24,255 + lsr r23 + rol r24 + eor r4,r24 + movw r12,r4 + movw r14,r6 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r4 + and r13,r5 + and r14,r6 + and r15,r7 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + com r9 + com r10 + com r11 + ldi r24,255 + lsr r23 + rol r24 + eor r8,r24 + movw r12,r8 + movw r14,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r8 + and r13,r9 + and r14,r10 + and r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r5 + com r6 + com r7 + ldi r24,255 + lsr r23 + rol r24 + eor r4,r24 + movw r12,r4 + movw r14,r6 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r4 + and r13,r5 + and r14,r6 + and r15,r7 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + com r9 + com r10 + com r11 + ldi r24,255 + lsr r23 + rol r24 + eor r8,r24 + movw r12,r8 + movw r14,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r8 + and r13,r9 + and r14,r10 + and r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r5 + com r6 + com r7 + ldi r24,255 + lsr r23 + rol r24 + eor r4,r24 + movw r12,r4 + movw r14,r6 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r4 + and r13,r5 + and r14,r6 + and r15,r7 + eor r8,r12 + eor r9,r13 + eor r10,r14 + eor r11,r15 + com r9 + com r10 + com r11 + ldi r24,255 + lsr r23 + rol r24 + eor r8,r24 + movw r12,r8 + movw r14,r10 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r8 + and r13,r9 + and r14,r10 + and r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r5 + com r6 + com r7 + ldi r24,255 + lsr r23 + rol r24 + eor r4,r24 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + com r12 + com r13 + com r14 + com r15 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + com r18 + com r19 + com r20 + com r21 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + std Y+9,r4 + std Y+10,r5 + std Y+11,r6 + std Y+12,r7 + movw r4,r12 + movw r6,r14 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + com r13 + com r14 + com r15 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r12,r23 + inc r30 + eor r12,r26 + eor r13,r27 + eor r14,r2 + eor r15,r3 + std Y+5,r26 + std Y+6,r27 + std Y+7,r2 + std Y+8,r3 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r2,Y+15 + ldd r3,Y+16 + com r27 + com r2 + com r3 +#if defined(RAMPZ) + elpm r23,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r23,Z +#elif defined(__AVR_TINY__) + ld r23,Z +#else + lpm + mov r23,r0 +#endif + eor r26,r23 + inc r30 + eor r26,r8 + eor r27,r9 + eor r2,r10 + eor r3,r11 + std Y+13,r8 + std Y+14,r9 + std Y+15,r10 + std Y+16,r11 + movw r8,r12 + movw r10,r14 + dec r22 + breq 5866f + rjmp 52b +5866: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + std Z+8,r21 + std Z+9,r20 + std Z+10,r19 + std Z+11,r18 + std Z+24,r3 + std Z+25,r2 + std Z+26,r27 + std Z+27,r26 + std Z+12,r7 + std Z+13,r6 + std Z+14,r5 + std Z+15,r4 + std Z+28,r11 + std Z+29,r10 + std Z+30,r9 + std Z+31,r8 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r2,Y+7 + ldd r3,Y+8 + ldd r4,Y+9 + ldd r5,Y+10 + ldd r6,Y+11 + ldd r7,Y+12 + ldd r8,Y+13 + ldd r9,Y+14 + ldd r10,Y+15 + ldd r11,Y+16 + st Z,r21 + std Z+1,r20 + std Z+2,r19 + std Z+3,r18 + std Z+4,r3 + std Z+5,r2 + std Z+6,r27 + std Z+7,r26 + std Z+16,r7 + std Z+17,r6 + std Z+18,r5 + std Z+19,r4 + std Z+20,r11 + std Z+21,r10 + std Z+22,r9 + std Z+23,r8 + adiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sliscp_light256_permute_spix, .-sliscp_light256_permute_spix + + .text +.global sliscp_light256_swap_spix + .type sliscp_light256_swap_spix, @function +sliscp_light256_swap_spix: + movw r30,r24 +.L__stack_usage = 2 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r26,Z+26 + ldd r27,Z+27 + std Z+24,r18 + std Z+25,r19 + std Z+26,r20 + std Z+27,r21 + std Z+12,r22 + std Z+13,r23 + std Z+14,r26 + std Z+15,r27 + ret + .size sliscp_light256_swap_spix, .-sliscp_light256_swap_spix + +#endif diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.c b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.c index 69b4519..dd3a688 100644 --- a/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.c +++ b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.c @@ -22,6 +22,8 @@ #include "internal-sliscp-light.h" +#if !defined(__AVR__) + /** * \brief Performs one round of the Simeck-64 block cipher. * @@ -173,11 +175,12 @@ void sliscp_light256_swap_spix(unsigned char block[32]) le_store_word32(block + 12, t2); } -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) +void sliscp_light256_permute_spoc(unsigned char block[32]) { const unsigned char *rc = sliscp_light256_RC; uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t t0, t1; + unsigned round; /* Load the block into local state variables */ x0 = be_load_word32(block); @@ -190,7 +193,7 @@ void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) x7 = be_load_word32(block + 28); /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { + for (round = 0; round < 18; ++round, rc += 4) { /* Apply Simeck-64 to two of the 64-bit sub-blocks */ simeck64_box(x2, x3, rc[0]); simeck64_box(x6, x7, rc[1]); @@ -406,3 +409,5 @@ void sliscp_light320_swap(unsigned char block[40]) le_store_word32(block + 16, t1); le_store_word32(block + 4, t2); } + +#endif /* !__AVR__ */ diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.h b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.h index fa6b9ba..8a5e8d5 100644 --- a/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.h +++ b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-sliscp-light.h @@ -92,7 +92,6 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * \brief Performs the sLiSCP-light permutation on a 256-bit block. * * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. * * The bytes of the block are assumed to be rearranged to match the * requirements of the SpoC-128 cipher. SpoC-128 interleaves the @@ -112,7 +111,7 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * * \sa sliscp_light256_swap_spoc() */ -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds); +void sliscp_light256_permute_spoc(unsigned char block[32]); /** * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. diff --git a/spix/Implementations/crypto_aead/spix128v1/rhys/internal-util.h b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/spix/Implementations/crypto_aead/spix128v1/rhys/internal-util.h +++ b/spix/Implementations/crypto_aead/spix128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.c b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.h b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/api.h b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/encrypt.c b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/encrypt.c deleted file mode 100644 index 6856b6f..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "spoc.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return spoc_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return spoc_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-192-avr.S b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-192-avr.S deleted file mode 100644 index 5860b14..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-192-avr.S +++ /dev/null @@ -1,794 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 72 -table_0: - .byte 7 - .byte 39 - .byte 8 - .byte 41 - .byte 4 - .byte 52 - .byte 12 - .byte 29 - .byte 6 - .byte 46 - .byte 10 - .byte 51 - .byte 37 - .byte 25 - .byte 47 - .byte 42 - .byte 23 - .byte 53 - .byte 56 - .byte 31 - .byte 28 - .byte 15 - .byte 36 - .byte 16 - .byte 18 - .byte 8 - .byte 54 - .byte 24 - .byte 59 - .byte 12 - .byte 13 - .byte 20 - .byte 38 - .byte 10 - .byte 43 - .byte 30 - .byte 21 - .byte 47 - .byte 62 - .byte 49 - .byte 63 - .byte 56 - .byte 1 - .byte 9 - .byte 32 - .byte 36 - .byte 33 - .byte 45 - .byte 48 - .byte 54 - .byte 17 - .byte 27 - .byte 40 - .byte 13 - .byte 57 - .byte 22 - .byte 60 - .byte 43 - .byte 5 - .byte 61 - .byte 34 - .byte 62 - .byte 39 - .byte 3 - .byte 19 - .byte 1 - .byte 52 - .byte 2 - .byte 26 - .byte 33 - .byte 46 - .byte 35 - - .text -.global sliscp_light192_permute - .type sliscp_light192_permute, @function -sliscp_light192_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r20,Z - ldd r19,Z+1 - ldd r18,Z+2 - ldd r21,Z+3 - ldd r23,Z+4 - ldd r22,Z+5 - ldd r28,Z+6 - ldd r27,Z+7 - ldd r26,Z+8 - ldd r29,Z+9 - ldd r3,Z+10 - ldd r2,Z+11 - ldd r6,Z+12 - ldd r5,Z+13 - ldd r4,Z+14 - ldd r7,Z+15 - ldd r9,Z+16 - ldd r8,Z+17 - ldd r12,Z+18 - ldd r11,Z+19 - ldd r10,Z+20 - ldd r13,Z+21 - ldd r15,Z+22 - ldd r14,Z+23 - push r31 - push r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r24,0 -28: - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - inc r24 - movw r16,r26 - mov r1,r28 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r29,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r26 - and r17,r27 - and r1,r28 - eor r2,r16 - eor r3,r17 - eor r29,r1 - com r3 - com r29 - ldi r16,255 - lsr r25 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r29 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r26,r16 - eor r27,r17 - eor r28,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r29 - eor r26,r16 - eor r27,r17 - eor r28,r1 - com r27 - com r28 - ldi r16,255 - lsr r25 - rol r16 - eor r26,r16 - movw r16,r26 - mov r1,r28 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r29,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r26 - and r17,r27 - and r1,r28 - eor r2,r16 - eor r3,r17 - eor r29,r1 - com r3 - com r29 - ldi r16,255 - lsr r25 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r29 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r26,r16 - eor r27,r17 - eor r28,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r29 - eor r26,r16 - eor r27,r17 - eor r28,r1 - com r27 - com r28 - ldi r16,255 - lsr r25 - rol r16 - eor r26,r16 - movw r16,r26 - mov r1,r28 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r29,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r26 - and r17,r27 - and r1,r28 - eor r2,r16 - eor r3,r17 - eor r29,r1 - com r3 - com r29 - ldi r16,255 - lsr r25 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r29 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r26,r16 - eor r27,r17 - eor r28,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r29 - eor r26,r16 - eor r27,r17 - eor r28,r1 - com r27 - com r28 - ldi r16,255 - lsr r25 - rol r16 - eor r26,r16 - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - inc r24 - movw r16,r10 - mov r1,r12 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r13,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - eor r14,r16 - eor r15,r17 - eor r13,r1 - com r15 - com r13 - ldi r16,255 - lsr r25 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r13 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r13 - eor r10,r16 - eor r11,r17 - eor r12,r1 - com r11 - com r12 - ldi r16,255 - lsr r25 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r13,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - eor r14,r16 - eor r15,r17 - eor r13,r1 - com r15 - com r13 - ldi r16,255 - lsr r25 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r13 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r13 - eor r10,r16 - eor r11,r17 - eor r12,r1 - com r11 - com r12 - ldi r16,255 - lsr r25 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r13,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - eor r14,r16 - eor r15,r17 - eor r13,r1 - com r15 - com r13 - ldi r16,255 - lsr r25 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r13 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r13 - eor r10,r16 - eor r11,r17 - eor r12,r1 - com r11 - com r12 - ldi r16,255 - lsr r25 - rol r16 - eor r10,r16 - com r18 - com r19 - com r20 - com r23 - com r21 - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - eor r22,r25 - inc r24 - com r4 - com r5 - com r6 - com r9 - com r7 - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - eor r8,r25 - inc r24 - movw r16,r18 - mov r1,r20 - eor r16,r26 - eor r17,r27 - eor r1,r28 - movw r18,r26 - mov r20,r28 - movw r26,r4 - mov r28,r6 - eor r26,r10 - eor r27,r11 - eor r28,r12 - movw r4,r10 - mov r6,r12 - movw r10,r16 - mov r12,r1 - movw r16,r22 - mov r1,r21 - eor r16,r2 - eor r17,r3 - eor r1,r29 - movw r22,r2 - mov r21,r29 - movw r2,r8 - mov r29,r7 - eor r2,r14 - eor r3,r15 - eor r29,r13 - movw r8,r14 - mov r7,r13 - movw r14,r16 - mov r13,r1 - ldi r17,72 - cpse r24,r17 - rjmp 28b -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r20 - std Z+1,r19 - std Z+2,r18 - std Z+3,r21 - std Z+4,r23 - std Z+5,r22 - std Z+6,r28 - std Z+7,r27 - std Z+8,r26 - std Z+9,r29 - std Z+10,r3 - std Z+11,r2 - std Z+12,r6 - std Z+13,r5 - std Z+14,r4 - std Z+15,r7 - std Z+16,r9 - std Z+17,r8 - std Z+18,r12 - std Z+19,r11 - std Z+20,r10 - std Z+21,r13 - std Z+22,r15 - std Z+23,r14 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - eor r1,r1 - ret - .size sliscp_light192_permute, .-sliscp_light192_permute - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-256-spoc-avr.S b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-256-spoc-avr.S deleted file mode 100644 index 84925b4..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-256-spoc-avr.S +++ /dev/null @@ -1,1142 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 72 -table_0: - .byte 15 - .byte 71 - .byte 8 - .byte 100 - .byte 4 - .byte 178 - .byte 134 - .byte 107 - .byte 67 - .byte 181 - .byte 226 - .byte 111 - .byte 241 - .byte 55 - .byte 137 - .byte 44 - .byte 68 - .byte 150 - .byte 230 - .byte 221 - .byte 115 - .byte 238 - .byte 202 - .byte 153 - .byte 229 - .byte 76 - .byte 23 - .byte 234 - .byte 11 - .byte 245 - .byte 142 - .byte 15 - .byte 71 - .byte 7 - .byte 100 - .byte 4 - .byte 178 - .byte 130 - .byte 107 - .byte 67 - .byte 181 - .byte 161 - .byte 111 - .byte 241 - .byte 55 - .byte 120 - .byte 44 - .byte 68 - .byte 150 - .byte 162 - .byte 221 - .byte 115 - .byte 238 - .byte 185 - .byte 153 - .byte 229 - .byte 76 - .byte 242 - .byte 234 - .byte 11 - .byte 245 - .byte 133 - .byte 15 - .byte 71 - .byte 7 - .byte 35 - .byte 4 - .byte 178 - .byte 130 - .byte 217 - .byte 67 - .byte 181 - - .text -.global sliscp_light256_permute_spoc - .type sliscp_light256_permute_spoc, @function -sliscp_light256_permute_spoc: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 31 - ld r21,Z - ldd r20,Z+1 - ldd r19,Z+2 - ldd r18,Z+3 - ldd r27,Z+4 - ldd r26,Z+5 - ldd r23,Z+6 - ldd r22,Z+7 - ldd r5,Z+8 - ldd r4,Z+9 - ldd r3,Z+10 - ldd r2,Z+11 - ldd r9,Z+12 - ldd r8,Z+13 - ldd r7,Z+14 - ldd r6,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r26 - std Y+8,r27 - std Y+9,r2 - std Y+10,r3 - std Y+11,r4 - std Y+12,r5 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r21,Z+16 - ldd r20,Z+17 - ldd r19,Z+18 - ldd r18,Z+19 - ldd r27,Z+20 - ldd r26,Z+21 - ldd r23,Z+22 - ldd r22,Z+23 - ldd r5,Z+24 - ldd r4,Z+25 - ldd r3,Z+26 - ldd r2,Z+27 - ldd r9,Z+28 - ldd r8,Z+29 - ldd r7,Z+30 - ldd r6,Z+31 - push r31 - push r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r1 -52: -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - inc r30 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - inc r30 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - com r12 - com r13 - com r14 - com r15 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - com r18 - com r19 - com r20 - com r21 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - std Y+9,r2 - std Y+10,r3 - std Y+11,r4 - std Y+12,r5 - movw r2,r12 - movw r4,r14 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - com r13 - com r14 - com r15 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - eor r12,r10 - inc r30 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - std Y+5,r22 - std Y+6,r23 - std Y+7,r26 - std Y+8,r27 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - com r23 - com r26 - com r27 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - eor r22,r10 - inc r30 - eor r22,r6 - eor r23,r7 - eor r26,r8 - eor r27,r9 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - movw r6,r12 - movw r8,r14 - ldi r25,72 - cpse r30,r25 - rjmp 52b -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - std Z+16,r21 - std Z+17,r20 - std Z+18,r19 - std Z+19,r18 - std Z+20,r27 - std Z+21,r26 - std Z+22,r23 - std Z+23,r22 - std Z+24,r5 - std Z+25,r4 - std Z+26,r3 - std Z+27,r2 - std Z+28,r9 - std Z+29,r8 - std Z+30,r7 - std Z+31,r6 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - ldd r2,Y+9 - ldd r3,Y+10 - ldd r4,Y+11 - ldd r5,Y+12 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - st Z,r21 - std Z+1,r20 - std Z+2,r19 - std Z+3,r18 - std Z+4,r27 - std Z+5,r26 - std Z+6,r23 - std Z+7,r22 - std Z+8,r5 - std Z+9,r4 - std Z+10,r3 - std Z+11,r2 - std Z+12,r9 - std Z+13,r8 - std Z+14,r7 - std Z+15,r6 - adiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r15 - pop r14 - pop r13 - pop r12 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sliscp_light256_permute_spoc, .-sliscp_light256_permute_spoc - - .text -.global sliscp_light256_swap_spoc - .type sliscp_light256_swap_spoc, @function -sliscp_light256_swap_spoc: - movw r30,r24 -.L__stack_usage = 2 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+8,r22 - std Z+9,r23 - std Z+10,r26 - std Z+11,r27 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r26,Z+22 - ldd r27,Z+23 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - std Z+12,r22 - std Z+13,r23 - std Z+14,r26 - std Z+15,r27 - ret - .size sliscp_light256_swap_spoc, .-sliscp_light256_swap_spoc - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.c b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.c deleted file mode 100644 index dd3a688..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sliscp-light.h" - -#if !defined(__AVR__) - -/** - * \brief Performs one round of the Simeck-64 block cipher. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - */ -#define simeck64_round(x, y) \ - do { \ - (y) ^= (leftRotate5((x)) & (x)) ^ leftRotate1((x)) ^ \ - 0xFFFFFFFEU ^ (_rc & 1); \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 64-bit block with the 8 round version of Simeck-64. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck64_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck64_round(x, y); /* Round 1 */ \ - simeck64_round(y, x); /* Round 2 */ \ - simeck64_round(x, y); /* Round 3 */ \ - simeck64_round(y, x); /* Round 4 */ \ - simeck64_round(x, y); /* Round 5 */ \ - simeck64_round(y, x); /* Round 6 */ \ - simeck64_round(x, y); /* Round 7 */ \ - simeck64_round(y, x); /* Round 8 */ \ - } while (0) - -/* Helper macros for 48-bit left rotations */ -#define leftRotate5_48(x) (((x) << 5) | ((x) >> 19)) -#define leftRotate1_48(x) (((x) << 1) | ((x) >> 23)) - -/** - * \brief Performs one round of the Simeck-48 block cipher. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - */ -#define simeck48_round(x, y) \ - do { \ - (y) ^= (leftRotate5_48((x)) & (x)) ^ leftRotate1_48((x)) ^ \ - 0x00FFFFFEU ^ (_rc & 1); \ - (y) &= 0x00FFFFFFU; \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 48-bit block with the 6 round version of Simeck-48. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck48_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck48_round(x, y); /* Round 1 */ \ - simeck48_round(y, x); /* Round 2 */ \ - simeck48_round(x, y); /* Round 3 */ \ - simeck48_round(y, x); /* Round 4 */ \ - simeck48_round(x, y); /* Round 5 */ \ - simeck48_round(y, x); /* Round 6 */ \ - } while (0) - -/* Interleaved rc0, rc1, sc0, and sc1 values for each round */ -static unsigned char const sliscp_light256_RC[18 * 4] = { - 0x0f, 0x47, 0x08, 0x64, 0x04, 0xb2, 0x86, 0x6b, - 0x43, 0xb5, 0xe2, 0x6f, 0xf1, 0x37, 0x89, 0x2c, - 0x44, 0x96, 0xe6, 0xdd, 0x73, 0xee, 0xca, 0x99, - 0xe5, 0x4c, 0x17, 0xea, 0x0b, 0xf5, 0x8e, 0x0f, - 0x47, 0x07, 0x64, 0x04, 0xb2, 0x82, 0x6b, 0x43, - 0xb5, 0xa1, 0x6f, 0xf1, 0x37, 0x78, 0x2c, 0x44, - 0x96, 0xa2, 0xdd, 0x73, 0xee, 0xb9, 0x99, 0xe5, - 0x4c, 0xf2, 0xea, 0x0b, 0xf5, 0x85, 0x0f, 0x47, - 0x07, 0x23, 0x04, 0xb2, 0x82, 0xd9, 0x43, 0xb5 -}; - -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 24); /* Assumes the block is pre-swapped */ - x4 = be_load_word32(block + 16); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 12); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 8, x2); - be_store_word32(block + 24, x3); /* Assumes the block is pre-swapped */ - be_store_word32(block + 16, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 12, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spix(unsigned char block[32]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 12); - t2 = le_load_word32(block + 24); - le_store_word32(block + 24, t1); - le_store_word32(block + 12, t2); -} - -void sliscp_light256_permute_spoc(unsigned char block[32]) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x3 = be_load_word32(block + 20); - x4 = be_load_word32(block + 8); - x5 = be_load_word32(block + 12); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 16, x2); /* Assumes the block is pre-swapped */ - be_store_word32(block + 20, x3); - be_store_word32(block + 8, x4); - be_store_word32(block + 12, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spoc(unsigned char block[32]) -{ - uint64_t t1, t2; - t1 = le_load_word64(block + 8); - t2 = le_load_word64(block + 16); - le_store_word64(block + 16, t1); - le_store_word64(block + 8, t2); -} - -/* Load a big-endian 24-bit word from a byte buffer */ -#define be_load_word24(ptr) \ - ((((uint32_t)((ptr)[0])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[2]))) - -/* Store a big-endian 24-bit word into a byte buffer */ -#define be_store_word24(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 16); \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)_x; \ - } while (0) - -void sliscp_light192_permute(unsigned char block[24]) -{ - /* Interleaved rc0, rc1, sc0, and sc1 values for each round */ - static unsigned char const RC[18 * 4] = { - 0x07, 0x27, 0x08, 0x29, 0x04, 0x34, 0x0c, 0x1d, - 0x06, 0x2e, 0x0a, 0x33, 0x25, 0x19, 0x2f, 0x2a, - 0x17, 0x35, 0x38, 0x1f, 0x1c, 0x0f, 0x24, 0x10, - 0x12, 0x08, 0x36, 0x18, 0x3b, 0x0c, 0x0d, 0x14, - 0x26, 0x0a, 0x2b, 0x1e, 0x15, 0x2f, 0x3e, 0x31, - 0x3f, 0x38, 0x01, 0x09, 0x20, 0x24, 0x21, 0x2d, - 0x30, 0x36, 0x11, 0x1b, 0x28, 0x0d, 0x39, 0x16, - 0x3c, 0x2b, 0x05, 0x3d, 0x22, 0x3e, 0x27, 0x03, - 0x13, 0x01, 0x34, 0x02, 0x1a, 0x21, 0x2e, 0x23 - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables. Each 24-bit block is - * placed into a separate 32-bit word which improves efficiency below */ - x0 = be_load_word24(block); - x1 = be_load_word24(block + 3); - x2 = be_load_word24(block + 6); - x3 = be_load_word24(block + 9); - x4 = be_load_word24(block + 12); - x5 = be_load_word24(block + 15); - x6 = be_load_word24(block + 18); - x7 = be_load_word24(block + 21); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-48 to two of the 48-bit sub-blocks */ - simeck48_box(x2, x3, rc[0]); - simeck48_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0x00FFFFFFU; - x1 ^= 0x00FFFF00U ^ rc[2]; - x4 ^= 0x00FFFFFFU; - x5 ^= 0x00FFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word24(block, x0); - be_store_word24(block + 3, x1); - be_store_word24(block + 6, x2); - be_store_word24(block + 9, x3); - be_store_word24(block + 12, x4); - be_store_word24(block + 15, x5); - be_store_word24(block + 18, x6); - be_store_word24(block + 21, x7); -} - -void sliscp_light320_permute(unsigned char block[40]) -{ - /* Interleaved rc0, rc1, rc2, sc0, sc1, and sc2 values for each round */ - static unsigned char const RC[16 * 6] = { - 0x07, 0x53, 0x43, 0x50, 0x28, 0x14, 0x0a, 0x5d, - 0xe4, 0x5c, 0xae, 0x57, 0x9b, 0x49, 0x5e, 0x91, - 0x48, 0x24, 0xe0, 0x7f, 0xcc, 0x8d, 0xc6, 0x63, - 0xd1, 0xbe, 0x32, 0x53, 0xa9, 0x54, 0x1a, 0x1d, - 0x4e, 0x60, 0x30, 0x18, 0x22, 0x28, 0x75, 0x68, - 0x34, 0x9a, 0xf7, 0x6c, 0x25, 0xe1, 0x70, 0x38, - 0x62, 0x82, 0xfd, 0xf6, 0x7b, 0xbd, 0x96, 0x47, - 0xf9, 0x9d, 0xce, 0x67, 0x71, 0x6b, 0x76, 0x40, - 0x20, 0x10, 0xaa, 0x88, 0xa0, 0x4f, 0x27, 0x13, - 0x2b, 0xdc, 0xb0, 0xbe, 0x5f, 0x2f, 0xe9, 0x8b, - 0x09, 0x5b, 0xad, 0xd6, 0xcf, 0x59, 0x1e, 0xe9, - 0x74, 0xba, 0xb7, 0xc6, 0xad, 0x7f, 0x3f, 0x1f - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 12); - x4 = be_load_word32(block + 4); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - x8 = be_load_word32(block + 32); - x9 = be_load_word32(block + 36); - - /* Perform all permutation rounds */ - for (round = 0; round < 16; ++round, rc += 6) { - /* Apply Simeck-64 to three of the 64-bit sub-blocks */ - simeck64_box(x0, x1, rc[0]); - simeck64_box(x4, x5, rc[1]); - simeck64_box(x8, x9, rc[2]); - x6 ^= x8; - x7 ^= x9; - x2 ^= x4; - x3 ^= x5; - x8 ^= x0; - x9 ^= x1; - - /* Add step constants */ - x2 ^= 0xFFFFFFFFU; - x3 ^= 0xFFFFFF00U ^ rc[3]; - x6 ^= 0xFFFFFFFFU; - x7 ^= 0xFFFFFF00U ^ rc[4]; - x8 ^= 0xFFFFFFFFU; - x9 ^= 0xFFFFFF00U ^ rc[5]; - - /* Rotate the sub-blocks */ - t0 = x8; - t1 = x9; - x8 = x2; - x9 = x3; - x2 = x4; - x3 = x5; - x4 = x0; - x5 = x1; - x0 = x6; - x1 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 16, x1); /* Assumes the block is pre-swapped */ - be_store_word32(block + 8, x2); - be_store_word32(block + 12, x3); - be_store_word32(block + 4, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); - be_store_word32(block + 32, x8); - be_store_word32(block + 36, x9); -} - -void sliscp_light320_swap(unsigned char block[40]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 4); - t2 = le_load_word32(block + 16); - le_store_word32(block + 16, t1); - le_store_word32(block + 4, t2); -} - -#endif /* !__AVR__ */ diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.h b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.h deleted file mode 100644 index 8a5e8d5..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-sliscp-light.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SLISCP_LIGHT_H -#define LW_INTERNAL_SLISCP_LIGHT_H - -/** - * \file internal-sliscp-light.h - * \brief sLiSCP-light permutation - * - * There are three variants of sLiSCP-light in use in the NIST submissions: - * - * \li sLiSCP-light-256 with a 256-bit block size, used in SPIX and SpoC. - * \li sLiSCP-light-192 with a 192-bit block size, used in SpoC. - * \li sLiSCP-light-320 with a 320-bit block size, used in ACE. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/ace, - * https://uwaterloo.ca/communications-security-lab/lwc/spix, - * https://uwaterloo.ca/communications-security-lab/lwc/spoc - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for sLiSCP-light-256. - */ -#define SLISCP_LIGHT256_STATE_SIZE 32 - -/** - * \brief Size of the state for sLiSCP-light-192. - */ -#define SLISCP_LIGHT192_STATE_SIZE 24 - -/** - * \brief Size of the state for sLiSCP-light-320. - */ -#define SLISCP_LIGHT320_STATE_SIZE 40 - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SPIX cipher. SPIX places the rate bytes at - * positions 8, 9, 10, 11, 24, 25, 26, and 27. - * - * This function assumes that bytes 24-27 have been pre-swapped with - * bytes 12-15 so that the rate portion of the state is contiguous. - * - * The sliscp_light256_swap_spix() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spix() - */ -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SPIX. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spix() - */ -void sliscp_light256_swap_spix(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SpoC-128 cipher. SpoC-128 interleaves the - * rate bytes and the mask bytes. This version assumes that the - * rate and mask are in contiguous bytes of the state. - * - * SpoC-128 absorbs bytes using the mask bytes of the state at offsets - * 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, and 31. - * It squeezes bytes using the rate bytes of the state at offsets - * 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, and 23. - * - * This function assumes that bytes 8-15 have been pre-swapped with 16-23 - * so that the rate and mask portions of the state are contiguous. - * - * The sliscp_light256_swap_spoc() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spoc() - */ -void sliscp_light256_permute_spoc(unsigned char block[32]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spoc() - */ -void sliscp_light256_swap_spoc(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 192-bit block. - * - * \param block Points to the block to be permuted. - */ -void sliscp_light192_permute(unsigned char block[24]); - -/** - * \brief Performs the sLiSCP-light permutation on a 320-bit block. - * - * \param block Points to the block to be permuted. - * - * The ACE specification refers to this permutation as "ACE" but that - * can be confused with the name of the AEAD mode so we call this - * permutation "sLiSCP-light-320" instead. - * - * ACE absorbs and squeezes data at the rate bytes 0, 1, 2, 3, 16, 17, 18, 19. - * Efficiency can suffer because of the discontinuity in rate byte positions. - * - * To counteract this, we assume that the input to the permutation has been - * pre-swapped: bytes 4, 5, 6, 7 are swapped with bytes 16, 17, 18, 19 so - * that the rate is contiguous at the start of the state. - * - * The sliscp_light320_swap() function can be used to switch between the - * canonical order and the pre-swapped order. - * - * \sa sliscp_light320_swap() - */ -void sliscp_light320_permute(unsigned char block[40]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 320-bit block. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light320_permute() - */ -void sliscp_light320_swap(unsigned char block[40]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-util.h b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.c b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.c deleted file mode 100644 index 92ee233..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.c +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "spoc.h" -#include "internal-sliscp-light.h" -#include "internal-util.h" -#include - -/** - * \brief Size of the state for the internal sLiSCP-light-256 permutation. - */ -#define SPOC_128_STATE_SIZE SLISCP_LIGHT256_STATE_SIZE - -/** - * \brief Rate for absorbing data into the sLiSCP-light-256 state and for - * squeezing data out again. - */ -#define SPOC_128_RATE 16 - -/** - * \brief Size of the state for the internal sLiSCP-light-192 permutation. - */ -#define SPOC_64_STATE_SIZE SLISCP_LIGHT192_STATE_SIZE - -/** - * \brief Rate for absorbing data into the sLiSCP-light-192 state and for - * squeezing data out again. - */ -#define SPOC_64_RATE 8 - -aead_cipher_t const spoc_128_cipher = { - "SpoC-128", - SPOC_KEY_SIZE, - SPOC_NONCE_SIZE, - SPOC_128_TAG_SIZE, - AEAD_FLAG_NONE, - spoc_128_aead_encrypt, - spoc_128_aead_decrypt -}; - -aead_cipher_t const spoc_64_cipher = { - "SpoC-64", - SPOC_KEY_SIZE, - SPOC_NONCE_SIZE, - SPOC_64_TAG_SIZE, - AEAD_FLAG_NONE, - spoc_64_aead_encrypt, - spoc_64_aead_decrypt -}; - -/* Indices of where a rate byte is located to help with padding */ -/* -static unsigned char const spoc_128_rate_posn[16] = { - 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 -}; -static unsigned char const spoc_128_mask_posn[16] = { - 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 -}; -*/ -static unsigned char const spoc_64_rate_posn[8] = { - 0, 1, 2, 3, 12, 13, 14, 15 -}; -static unsigned char const spoc_64_mask_posn[8] = { - 6, 7, 8, 9, 18, 19, 20, 21 -}; - -/** - * \brief Initializes the SpoC-128 state. - * - * \param state sLiSCP-light-256 permutation state. - * \param k Points to the 128-bit key. - * \param npub Points to the 128-bit nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void spoc_128_init - (unsigned char state[SPOC_128_STATE_SIZE], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state by combining the key and nonce */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Absorb the associated data into the state */ - if (adlen != 0) { - while (adlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, ad, SPOC_128_RATE); - state[0] ^= 0x20; /* domain separation */ - ad += SPOC_128_RATE; - adlen -= SPOC_128_RATE; - } - temp = (unsigned)adlen; - if (temp > 0) { - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, ad, temp); - state[temp + 16] ^= 0x80; /* padding */ - state[0] ^= 0x30; /* domain separation */ - } - } -} - -/** - * \brief Initializes the SpoC-64 state. - * - * \param state sLiSCP-light-192 permutation state. - * \param k Points to the 128-bit key. - * \param npub Points to the 128-bit nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void spoc_64_init - (unsigned char state[SPOC_64_STATE_SIZE], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state by interleaving the key and nonce */ - memcpy(state, npub, 4); - state[4] = k[6]; - state[5] = k[7]; - memcpy(state + 6, k, 6); - memcpy(state + 12, npub + 4, 4); - state[16] = k[14]; - state[17] = k[15]; - memcpy(state + 18, k + 8, 6); - sliscp_light192_permute(state); - lw_xor_block(state + 6, npub + 8, 4); - lw_xor_block(state + 18, npub + 12, 4); - - /* Absorb the associated data into the state */ - if (adlen != 0) { - while (adlen >= SPOC_64_RATE) { - sliscp_light192_permute(state); - lw_xor_block(state + 6, ad, 4); - lw_xor_block(state + 18, ad + 4, 4); - state[0] ^= 0x20; /* domain separation */ - ad += SPOC_64_RATE; - adlen -= SPOC_64_RATE; - } - temp = (unsigned)adlen; - if (temp > 0) { - sliscp_light192_permute(state); - state[spoc_64_mask_posn[temp]] ^= 0x80; /* padding */ - state[0] ^= 0x30; /* domain separation */ - while (temp > 0) { - --temp; - state[spoc_64_mask_posn[temp]] ^= ad[temp]; - } - } - } -} - -/** - * \brief Finalizes the SpoC-128 encryption or decryption operation. - * - * \param state sLiSCP-light-256 permutation state. - * \param tag Points to the 16 byte buffer to receive the computed tag. - */ -static void spoc_128_finalize - (unsigned char state[SPOC_128_STATE_SIZE], unsigned char *tag) -{ - /* Pad and permute the state one more time */ - state[0] ^= 0x80; - sliscp_light256_permute_spoc(state); - - /* Copy out the authentication tag */ - memcpy(tag, state + 16, 16); -} - -/** - * \brief Finalizes the SpoC-64 encryption or decryption operation. - * - * \param state sLiSCP-light-192 permutation state. - * \param tag Points to the 16 byte buffer to receive the computed tag. - */ -static void spoc_64_finalize - (unsigned char state[SPOC_64_STATE_SIZE], unsigned char *tag) -{ - /* Pad and permute the state one more time */ - state[0] ^= 0x80; - sliscp_light192_permute(state); - - /* Copy out the authentication tag */ - memcpy(tag, state + 6, 4); - memcpy(tag + 4, state + 18, 4); -} - -int spoc_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_128_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOC_128_TAG_SIZE; - - /* Initialize the SpoC-128 state and absorb the associated data */ - spoc_128_init(state, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen != 0) { - while (mlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, m, SPOC_128_RATE); - lw_xor_block_2_src(c, m, state, SPOC_128_RATE); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_128_RATE; - m += SPOC_128_RATE; - mlen -= SPOC_128_RATE; - } - if (mlen != 0) { - unsigned temp = (unsigned)mlen; - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, m, temp); - lw_xor_block_2_src(c, m, state, temp); - state[temp + 16] ^= 0x80; /* padding */ - state[0] ^= 0x50; /* domain separation */ - c += mlen; - } - } - - /* Finalize and generate the authentication tag */ - spoc_128_finalize(state, c); - return 0; -} - -int spoc_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_128_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOC_128_TAG_SIZE) - return -1; - *mlen = clen - SPOC_128_TAG_SIZE; - - /* Initialize the Spoc-128 state and absorb the associated data */ - spoc_128_init(state, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOC_128_TAG_SIZE; - if (clen != 0) { - while (clen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state); - lw_xor_block_2_src(m, c, state, SPOC_128_RATE); - lw_xor_block(state + 16, m, SPOC_128_RATE); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_128_RATE; - m += SPOC_128_RATE; - clen -= SPOC_128_RATE; - } - if (clen != 0) { - unsigned temp = (unsigned)clen; - sliscp_light256_permute_spoc(state); - lw_xor_block_2_src(m, c, state, temp); - lw_xor_block(state + 16, m, temp); - state[temp + 16] ^= 0x80; /* padding */ - state[0] ^= 0x50; /* domain separation */ - c += clen; - } - } - - /* Finalize and check the authentication tag */ - spoc_128_finalize(state, state); - return aead_check_tag(mtemp, *mlen, state, c, SPOC_128_TAG_SIZE); -} - -int spoc_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_64_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOC_64_TAG_SIZE; - - /* Initialize the SpoC-64 state and absorb the associated data */ - spoc_64_init(state, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen != 0) { - while (mlen >= SPOC_64_RATE) { - sliscp_light192_permute(state); - lw_xor_block(state + 6, m, 4); - lw_xor_block(state + 18, m + 4, 4); - lw_xor_block_2_src(c, m, state, 4); - lw_xor_block_2_src(c + 4, m + 4, state + 12, 4); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_64_RATE; - m += SPOC_64_RATE; - mlen -= SPOC_64_RATE; - } - if (mlen != 0) { - unsigned temp = (unsigned)mlen; - sliscp_light192_permute(state); - state[spoc_64_mask_posn[temp]] ^= 0x80; /* padding */ - while (temp > 0) { - --temp; - unsigned char mbyte = m[temp]; - state[spoc_64_mask_posn[temp]] ^= mbyte; - c[temp] = mbyte ^ state[spoc_64_rate_posn[temp]]; - } - state[0] ^= 0x50; /* domain separation */ - c += mlen; - } - } - - /* Finalize and generate the authentication tag */ - spoc_64_finalize(state, c); - return 0; -} - -int spoc_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_64_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOC_64_TAG_SIZE) - return -1; - *mlen = clen - SPOC_64_TAG_SIZE; - - /* Initialize the Spoc-64 state and absorb the associated data */ - spoc_64_init(state, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOC_64_TAG_SIZE; - if (clen != 0) { - while (clen >= SPOC_64_RATE) { - sliscp_light192_permute(state); - lw_xor_block_2_src(m, c, state, 4); - lw_xor_block_2_src(m + 4, c + 4, state + 12, 4); - lw_xor_block(state + 6, m, 4); - lw_xor_block(state + 18, m + 4, 4); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_64_RATE; - m += SPOC_64_RATE; - clen -= SPOC_64_RATE; - } - if (clen != 0) { - unsigned temp = (unsigned)clen; - sliscp_light192_permute(state); - state[spoc_64_mask_posn[temp]] ^= 0x80; /* padding */ - while (temp > 0) { - --temp; - unsigned char mbyte = c[temp] ^ state[spoc_64_rate_posn[temp]]; - state[spoc_64_mask_posn[temp]] ^= mbyte; - m[temp] = mbyte; - } - state[0] ^= 0x50; /* domain separation */ - c += clen; - } - } - - /* Finalize and check the authentication tag */ - spoc_64_finalize(state, state); - return aead_check_tag(mtemp, *mlen, state, c, SPOC_64_TAG_SIZE); -} diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.h b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.h deleted file mode 100644 index 712c2d0..0000000 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys-avr/spoc.h +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPOC_H -#define LWCRYPTO_SPOC_H - -#include "aead-common.h" - -/** - * \file spoc.h - * \brief SpoC authenticated encryption algorithm. - * - * SpoC is a family of authenticated encryption algorithms with two - * members, SpoC-128 and Spoc-64. The algorithms use a Beetle-like - * sponge construction built on top of the sLiSCP-light permutation. - * - * \li Spoc-128 has a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * It is built around the 256-bit version of the sLiSCP-light permutation. - * This is the primary member of the family. - * \li Spoc-64 has a 128-bit key, a 128-bit nonce, and a 64-bit tag. - * It is built around the 192-bit version of the sLiSCP-light permutation. - * - * Spoc-128 has good performance on small packets (16 bytes or less) - * on 32-bit embedded platforms. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/spoc - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SpoC variants. - */ -#define SPOC_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SpoC-128. - */ -#define SPOC_128_TAG_SIZE 16 - -/** - * \brief Size of the authentication tag for SpoC-64. - */ -#define SPOC_64_TAG_SIZE 8 - -/** - * \brief Size of the nonce for all SpoC variants. - */ -#define SPOC_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the SpoC-128 cipher. - */ -extern aead_cipher_t const spoc_128_cipher; - -/** - * \brief Meta-information block for the SpoC-64 cipher. - */ -extern aead_cipher_t const spoc_64_cipher; - -/** - * \brief Encrypts and authenticates a packet with SpoC-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spoc_128_aead_decrypt() - */ -int spoc_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SpoC-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spoc_128_aead_encrypt() - */ -int spoc_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SpoC-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spoc_64_aead_decrypt() - */ -int spoc_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SpoC-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spoc_64_aead_encrypt() - */ -int spoc_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-192-avr.S b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-192-avr.S new file mode 100644 index 0000000..5860b14 --- /dev/null +++ b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-192-avr.S @@ -0,0 +1,794 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 72 +table_0: + .byte 7 + .byte 39 + .byte 8 + .byte 41 + .byte 4 + .byte 52 + .byte 12 + .byte 29 + .byte 6 + .byte 46 + .byte 10 + .byte 51 + .byte 37 + .byte 25 + .byte 47 + .byte 42 + .byte 23 + .byte 53 + .byte 56 + .byte 31 + .byte 28 + .byte 15 + .byte 36 + .byte 16 + .byte 18 + .byte 8 + .byte 54 + .byte 24 + .byte 59 + .byte 12 + .byte 13 + .byte 20 + .byte 38 + .byte 10 + .byte 43 + .byte 30 + .byte 21 + .byte 47 + .byte 62 + .byte 49 + .byte 63 + .byte 56 + .byte 1 + .byte 9 + .byte 32 + .byte 36 + .byte 33 + .byte 45 + .byte 48 + .byte 54 + .byte 17 + .byte 27 + .byte 40 + .byte 13 + .byte 57 + .byte 22 + .byte 60 + .byte 43 + .byte 5 + .byte 61 + .byte 34 + .byte 62 + .byte 39 + .byte 3 + .byte 19 + .byte 1 + .byte 52 + .byte 2 + .byte 26 + .byte 33 + .byte 46 + .byte 35 + + .text +.global sliscp_light192_permute + .type sliscp_light192_permute, @function +sliscp_light192_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r20,Z + ldd r19,Z+1 + ldd r18,Z+2 + ldd r21,Z+3 + ldd r23,Z+4 + ldd r22,Z+5 + ldd r28,Z+6 + ldd r27,Z+7 + ldd r26,Z+8 + ldd r29,Z+9 + ldd r3,Z+10 + ldd r2,Z+11 + ldd r6,Z+12 + ldd r5,Z+13 + ldd r4,Z+14 + ldd r7,Z+15 + ldd r9,Z+16 + ldd r8,Z+17 + ldd r12,Z+18 + ldd r11,Z+19 + ldd r10,Z+20 + ldd r13,Z+21 + ldd r15,Z+22 + ldd r14,Z+23 + push r31 + push r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r24,0 +28: + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + inc r24 + movw r16,r26 + mov r1,r28 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r29,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r26 + and r17,r27 + and r1,r28 + eor r2,r16 + eor r3,r17 + eor r29,r1 + com r3 + com r29 + ldi r16,255 + lsr r25 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r29 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r26,r16 + eor r27,r17 + eor r28,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r29 + eor r26,r16 + eor r27,r17 + eor r28,r1 + com r27 + com r28 + ldi r16,255 + lsr r25 + rol r16 + eor r26,r16 + movw r16,r26 + mov r1,r28 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r29,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r26 + and r17,r27 + and r1,r28 + eor r2,r16 + eor r3,r17 + eor r29,r1 + com r3 + com r29 + ldi r16,255 + lsr r25 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r29 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r26,r16 + eor r27,r17 + eor r28,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r29 + eor r26,r16 + eor r27,r17 + eor r28,r1 + com r27 + com r28 + ldi r16,255 + lsr r25 + rol r16 + eor r26,r16 + movw r16,r26 + mov r1,r28 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r29,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r26 + and r17,r27 + and r1,r28 + eor r2,r16 + eor r3,r17 + eor r29,r1 + com r3 + com r29 + ldi r16,255 + lsr r25 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r29 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r26,r16 + eor r27,r17 + eor r28,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r29 + eor r26,r16 + eor r27,r17 + eor r28,r1 + com r27 + com r28 + ldi r16,255 + lsr r25 + rol r16 + eor r26,r16 + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + inc r24 + movw r16,r10 + mov r1,r12 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r13,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + eor r14,r16 + eor r15,r17 + eor r13,r1 + com r15 + com r13 + ldi r16,255 + lsr r25 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r13 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r13 + eor r10,r16 + eor r11,r17 + eor r12,r1 + com r11 + com r12 + ldi r16,255 + lsr r25 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r13,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + eor r14,r16 + eor r15,r17 + eor r13,r1 + com r15 + com r13 + ldi r16,255 + lsr r25 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r13 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r13 + eor r10,r16 + eor r11,r17 + eor r12,r1 + com r11 + com r12 + ldi r16,255 + lsr r25 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r13,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + eor r14,r16 + eor r15,r17 + eor r13,r1 + com r15 + com r13 + ldi r16,255 + lsr r25 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r13 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r13 + eor r10,r16 + eor r11,r17 + eor r12,r1 + com r11 + com r12 + ldi r16,255 + lsr r25 + rol r16 + eor r10,r16 + com r18 + com r19 + com r20 + com r23 + com r21 + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + eor r22,r25 + inc r24 + com r4 + com r5 + com r6 + com r9 + com r7 + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + eor r8,r25 + inc r24 + movw r16,r18 + mov r1,r20 + eor r16,r26 + eor r17,r27 + eor r1,r28 + movw r18,r26 + mov r20,r28 + movw r26,r4 + mov r28,r6 + eor r26,r10 + eor r27,r11 + eor r28,r12 + movw r4,r10 + mov r6,r12 + movw r10,r16 + mov r12,r1 + movw r16,r22 + mov r1,r21 + eor r16,r2 + eor r17,r3 + eor r1,r29 + movw r22,r2 + mov r21,r29 + movw r2,r8 + mov r29,r7 + eor r2,r14 + eor r3,r15 + eor r29,r13 + movw r8,r14 + mov r7,r13 + movw r14,r16 + mov r13,r1 + ldi r17,72 + cpse r24,r17 + rjmp 28b +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r20 + std Z+1,r19 + std Z+2,r18 + std Z+3,r21 + std Z+4,r23 + std Z+5,r22 + std Z+6,r28 + std Z+7,r27 + std Z+8,r26 + std Z+9,r29 + std Z+10,r3 + std Z+11,r2 + std Z+12,r6 + std Z+13,r5 + std Z+14,r4 + std Z+15,r7 + std Z+16,r9 + std Z+17,r8 + std Z+18,r12 + std Z+19,r11 + std Z+20,r10 + std Z+21,r13 + std Z+22,r15 + std Z+23,r14 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + eor r1,r1 + ret + .size sliscp_light192_permute, .-sliscp_light192_permute + +#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-256-spoc-avr.S b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-256-spoc-avr.S new file mode 100644 index 0000000..84925b4 --- /dev/null +++ b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-256-spoc-avr.S @@ -0,0 +1,1142 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 72 +table_0: + .byte 15 + .byte 71 + .byte 8 + .byte 100 + .byte 4 + .byte 178 + .byte 134 + .byte 107 + .byte 67 + .byte 181 + .byte 226 + .byte 111 + .byte 241 + .byte 55 + .byte 137 + .byte 44 + .byte 68 + .byte 150 + .byte 230 + .byte 221 + .byte 115 + .byte 238 + .byte 202 + .byte 153 + .byte 229 + .byte 76 + .byte 23 + .byte 234 + .byte 11 + .byte 245 + .byte 142 + .byte 15 + .byte 71 + .byte 7 + .byte 100 + .byte 4 + .byte 178 + .byte 130 + .byte 107 + .byte 67 + .byte 181 + .byte 161 + .byte 111 + .byte 241 + .byte 55 + .byte 120 + .byte 44 + .byte 68 + .byte 150 + .byte 162 + .byte 221 + .byte 115 + .byte 238 + .byte 185 + .byte 153 + .byte 229 + .byte 76 + .byte 242 + .byte 234 + .byte 11 + .byte 245 + .byte 133 + .byte 15 + .byte 71 + .byte 7 + .byte 35 + .byte 4 + .byte 178 + .byte 130 + .byte 217 + .byte 67 + .byte 181 + + .text +.global sliscp_light256_permute_spoc + .type sliscp_light256_permute_spoc, @function +sliscp_light256_permute_spoc: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 31 + ld r21,Z + ldd r20,Z+1 + ldd r19,Z+2 + ldd r18,Z+3 + ldd r27,Z+4 + ldd r26,Z+5 + ldd r23,Z+6 + ldd r22,Z+7 + ldd r5,Z+8 + ldd r4,Z+9 + ldd r3,Z+10 + ldd r2,Z+11 + ldd r9,Z+12 + ldd r8,Z+13 + ldd r7,Z+14 + ldd r6,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r26 + std Y+8,r27 + std Y+9,r2 + std Y+10,r3 + std Y+11,r4 + std Y+12,r5 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r21,Z+16 + ldd r20,Z+17 + ldd r19,Z+18 + ldd r18,Z+19 + ldd r27,Z+20 + ldd r26,Z+21 + ldd r23,Z+22 + ldd r22,Z+23 + ldd r5,Z+24 + ldd r4,Z+25 + ldd r3,Z+26 + ldd r2,Z+27 + ldd r9,Z+28 + ldd r8,Z+29 + ldd r7,Z+30 + ldd r6,Z+31 + push r31 + push r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r1 +52: +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + inc r30 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + inc r30 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + com r12 + com r13 + com r14 + com r15 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + com r18 + com r19 + com r20 + com r21 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + std Y+9,r2 + std Y+10,r3 + std Y+11,r4 + std Y+12,r5 + movw r2,r12 + movw r4,r14 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + com r13 + com r14 + com r15 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + eor r12,r10 + inc r30 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + std Y+5,r22 + std Y+6,r23 + std Y+7,r26 + std Y+8,r27 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + com r23 + com r26 + com r27 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + eor r22,r10 + inc r30 + eor r22,r6 + eor r23,r7 + eor r26,r8 + eor r27,r9 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + movw r6,r12 + movw r8,r14 + ldi r25,72 + cpse r30,r25 + rjmp 52b +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + std Z+16,r21 + std Z+17,r20 + std Z+18,r19 + std Z+19,r18 + std Z+20,r27 + std Z+21,r26 + std Z+22,r23 + std Z+23,r22 + std Z+24,r5 + std Z+25,r4 + std Z+26,r3 + std Z+27,r2 + std Z+28,r9 + std Z+29,r8 + std Z+30,r7 + std Z+31,r6 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + ldd r2,Y+9 + ldd r3,Y+10 + ldd r4,Y+11 + ldd r5,Y+12 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + st Z,r21 + std Z+1,r20 + std Z+2,r19 + std Z+3,r18 + std Z+4,r27 + std Z+5,r26 + std Z+6,r23 + std Z+7,r22 + std Z+8,r5 + std Z+9,r4 + std Z+10,r3 + std Z+11,r2 + std Z+12,r9 + std Z+13,r8 + std Z+14,r7 + std Z+15,r6 + adiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r15 + pop r14 + pop r13 + pop r12 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sliscp_light256_permute_spoc, .-sliscp_light256_permute_spoc + + .text +.global sliscp_light256_swap_spoc + .type sliscp_light256_swap_spoc, @function +sliscp_light256_swap_spoc: + movw r30,r24 +.L__stack_usage = 2 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+8,r22 + std Z+9,r23 + std Z+10,r26 + std Z+11,r27 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r26,Z+22 + ldd r27,Z+23 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + std Z+12,r22 + std Z+13,r23 + std Z+14,r26 + std Z+15,r27 + ret + .size sliscp_light256_swap_spoc, .-sliscp_light256_swap_spoc + +#endif diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.c b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.c index 69b4519..dd3a688 100644 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.c +++ b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.c @@ -22,6 +22,8 @@ #include "internal-sliscp-light.h" +#if !defined(__AVR__) + /** * \brief Performs one round of the Simeck-64 block cipher. * @@ -173,11 +175,12 @@ void sliscp_light256_swap_spix(unsigned char block[32]) le_store_word32(block + 12, t2); } -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) +void sliscp_light256_permute_spoc(unsigned char block[32]) { const unsigned char *rc = sliscp_light256_RC; uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t t0, t1; + unsigned round; /* Load the block into local state variables */ x0 = be_load_word32(block); @@ -190,7 +193,7 @@ void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) x7 = be_load_word32(block + 28); /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { + for (round = 0; round < 18; ++round, rc += 4) { /* Apply Simeck-64 to two of the 64-bit sub-blocks */ simeck64_box(x2, x3, rc[0]); simeck64_box(x6, x7, rc[1]); @@ -406,3 +409,5 @@ void sliscp_light320_swap(unsigned char block[40]) le_store_word32(block + 16, t1); le_store_word32(block + 4, t2); } + +#endif /* !__AVR__ */ diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.h b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.h index fa6b9ba..8a5e8d5 100644 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.h +++ b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-sliscp-light.h @@ -92,7 +92,6 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * \brief Performs the sLiSCP-light permutation on a 256-bit block. * * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. * * The bytes of the block are assumed to be rearranged to match the * requirements of the SpoC-128 cipher. SpoC-128 interleaves the @@ -112,7 +111,7 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * * \sa sliscp_light256_swap_spoc() */ -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds); +void sliscp_light256_permute_spoc(unsigned char block[32]); /** * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-util.h b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-util.h +++ b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/spoc.c b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/spoc.c index 1af7d59..92ee233 100644 --- a/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/spoc.c +++ b/spoc/Implementations/crypto_aead/spoc128sliscplight256v1/rhys/spoc.c @@ -106,7 +106,7 @@ static void spoc_128_init /* Absorb the associated data into the state */ if (adlen != 0) { while (adlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, ad, SPOC_128_RATE); state[0] ^= 0x20; /* domain separation */ ad += SPOC_128_RATE; @@ -114,7 +114,7 @@ static void spoc_128_init } temp = (unsigned)adlen; if (temp > 0) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, ad, temp); state[temp + 16] ^= 0x80; /* padding */ state[0] ^= 0x30; /* domain separation */ @@ -185,7 +185,7 @@ static void spoc_128_finalize { /* Pad and permute the state one more time */ state[0] ^= 0x80; - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); /* Copy out the authentication tag */ memcpy(tag, state + 16, 16); @@ -229,7 +229,7 @@ int spoc_128_aead_encrypt /* Encrypt the plaintext to produce the ciphertext */ if (mlen != 0) { while (mlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, m, SPOC_128_RATE); lw_xor_block_2_src(c, m, state, SPOC_128_RATE); state[0] ^= 0x40; /* domain separation */ @@ -239,7 +239,7 @@ int spoc_128_aead_encrypt } if (mlen != 0) { unsigned temp = (unsigned)mlen; - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, m, temp); lw_xor_block_2_src(c, m, state, temp); state[temp + 16] ^= 0x80; /* padding */ @@ -277,7 +277,7 @@ int spoc_128_aead_decrypt clen -= SPOC_128_TAG_SIZE; if (clen != 0) { while (clen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block_2_src(m, c, state, SPOC_128_RATE); lw_xor_block(state + 16, m, SPOC_128_RATE); state[0] ^= 0x40; /* domain separation */ @@ -287,7 +287,7 @@ int spoc_128_aead_decrypt } if (clen != 0) { unsigned temp = (unsigned)clen; - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block_2_src(m, c, state, temp); lw_xor_block(state + 16, m, temp); state[temp + 16] ^= 0x80; /* padding */ diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.c b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.h b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/api.h b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/api.h deleted file mode 100644 index 4bf8f5c..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/encrypt.c b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/encrypt.c deleted file mode 100644 index f8dd710..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "spoc.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return spoc_64_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return spoc_64_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-192-avr.S b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-192-avr.S deleted file mode 100644 index 5860b14..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-192-avr.S +++ /dev/null @@ -1,794 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 72 -table_0: - .byte 7 - .byte 39 - .byte 8 - .byte 41 - .byte 4 - .byte 52 - .byte 12 - .byte 29 - .byte 6 - .byte 46 - .byte 10 - .byte 51 - .byte 37 - .byte 25 - .byte 47 - .byte 42 - .byte 23 - .byte 53 - .byte 56 - .byte 31 - .byte 28 - .byte 15 - .byte 36 - .byte 16 - .byte 18 - .byte 8 - .byte 54 - .byte 24 - .byte 59 - .byte 12 - .byte 13 - .byte 20 - .byte 38 - .byte 10 - .byte 43 - .byte 30 - .byte 21 - .byte 47 - .byte 62 - .byte 49 - .byte 63 - .byte 56 - .byte 1 - .byte 9 - .byte 32 - .byte 36 - .byte 33 - .byte 45 - .byte 48 - .byte 54 - .byte 17 - .byte 27 - .byte 40 - .byte 13 - .byte 57 - .byte 22 - .byte 60 - .byte 43 - .byte 5 - .byte 61 - .byte 34 - .byte 62 - .byte 39 - .byte 3 - .byte 19 - .byte 1 - .byte 52 - .byte 2 - .byte 26 - .byte 33 - .byte 46 - .byte 35 - - .text -.global sliscp_light192_permute - .type sliscp_light192_permute, @function -sliscp_light192_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 -.L__stack_usage = 18 - ld r20,Z - ldd r19,Z+1 - ldd r18,Z+2 - ldd r21,Z+3 - ldd r23,Z+4 - ldd r22,Z+5 - ldd r28,Z+6 - ldd r27,Z+7 - ldd r26,Z+8 - ldd r29,Z+9 - ldd r3,Z+10 - ldd r2,Z+11 - ldd r6,Z+12 - ldd r5,Z+13 - ldd r4,Z+14 - ldd r7,Z+15 - ldd r9,Z+16 - ldd r8,Z+17 - ldd r12,Z+18 - ldd r11,Z+19 - ldd r10,Z+20 - ldd r13,Z+21 - ldd r15,Z+22 - ldd r14,Z+23 - push r31 - push r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r24,0 -28: - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - inc r24 - movw r16,r26 - mov r1,r28 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r29,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r26 - and r17,r27 - and r1,r28 - eor r2,r16 - eor r3,r17 - eor r29,r1 - com r3 - com r29 - ldi r16,255 - lsr r25 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r29 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r26,r16 - eor r27,r17 - eor r28,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r29 - eor r26,r16 - eor r27,r17 - eor r28,r1 - com r27 - com r28 - ldi r16,255 - lsr r25 - rol r16 - eor r26,r16 - movw r16,r26 - mov r1,r28 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r29,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r26 - and r17,r27 - and r1,r28 - eor r2,r16 - eor r3,r17 - eor r29,r1 - com r3 - com r29 - ldi r16,255 - lsr r25 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r29 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r26,r16 - eor r27,r17 - eor r28,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r29 - eor r26,r16 - eor r27,r17 - eor r28,r1 - com r27 - com r28 - ldi r16,255 - lsr r25 - rol r16 - eor r26,r16 - movw r16,r26 - mov r1,r28 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r2,r16 - eor r3,r17 - eor r29,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r26 - and r17,r27 - and r1,r28 - eor r2,r16 - eor r3,r17 - eor r29,r1 - com r3 - com r29 - ldi r16,255 - lsr r25 - rol r16 - eor r2,r16 - movw r16,r2 - mov r1,r29 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r26,r16 - eor r27,r17 - eor r28,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r2 - and r17,r3 - and r1,r29 - eor r26,r16 - eor r27,r17 - eor r28,r1 - com r27 - com r28 - ldi r16,255 - lsr r25 - rol r16 - eor r26,r16 - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - inc r24 - movw r16,r10 - mov r1,r12 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r13,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - eor r14,r16 - eor r15,r17 - eor r13,r1 - com r15 - com r13 - ldi r16,255 - lsr r25 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r13 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r13 - eor r10,r16 - eor r11,r17 - eor r12,r1 - com r11 - com r12 - ldi r16,255 - lsr r25 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r13,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - eor r14,r16 - eor r15,r17 - eor r13,r1 - com r15 - com r13 - ldi r16,255 - lsr r25 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r13 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r13 - eor r10,r16 - eor r11,r17 - eor r12,r1 - com r11 - com r12 - ldi r16,255 - lsr r25 - rol r16 - eor r10,r16 - movw r16,r10 - mov r1,r12 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r14,r16 - eor r15,r17 - eor r13,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r10 - and r17,r11 - and r1,r12 - eor r14,r16 - eor r15,r17 - eor r13,r1 - com r15 - com r13 - ldi r16,255 - lsr r25 - rol r16 - eor r14,r16 - movw r16,r14 - mov r1,r13 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - eor r10,r16 - eor r11,r17 - eor r12,r1 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - bst r1,7 - lsl r16 - rol r17 - rol r1 - bld r16,0 - and r16,r14 - and r17,r15 - and r1,r13 - eor r10,r16 - eor r11,r17 - eor r12,r1 - com r11 - com r12 - ldi r16,255 - lsr r25 - rol r16 - eor r10,r16 - com r18 - com r19 - com r20 - com r23 - com r21 - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - eor r22,r25 - inc r24 - com r4 - com r5 - com r6 - com r9 - com r7 - mov r30,r24 -#if defined(RAMPZ) - elpm r25,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r25,Z -#elif defined(__AVR_TINY__) - ld r25,Z -#else - lpm - mov r25,r0 -#endif - eor r8,r25 - inc r24 - movw r16,r18 - mov r1,r20 - eor r16,r26 - eor r17,r27 - eor r1,r28 - movw r18,r26 - mov r20,r28 - movw r26,r4 - mov r28,r6 - eor r26,r10 - eor r27,r11 - eor r28,r12 - movw r4,r10 - mov r6,r12 - movw r10,r16 - mov r12,r1 - movw r16,r22 - mov r1,r21 - eor r16,r2 - eor r17,r3 - eor r1,r29 - movw r22,r2 - mov r21,r29 - movw r2,r8 - mov r29,r7 - eor r2,r14 - eor r3,r15 - eor r29,r13 - movw r8,r14 - mov r7,r13 - movw r14,r16 - mov r13,r1 - ldi r17,72 - cpse r24,r17 - rjmp 28b -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - st Z,r20 - std Z+1,r19 - std Z+2,r18 - std Z+3,r21 - std Z+4,r23 - std Z+5,r22 - std Z+6,r28 - std Z+7,r27 - std Z+8,r26 - std Z+9,r29 - std Z+10,r3 - std Z+11,r2 - std Z+12,r6 - std Z+13,r5 - std Z+14,r4 - std Z+15,r7 - std Z+16,r9 - std Z+17,r8 - std Z+18,r12 - std Z+19,r11 - std Z+20,r10 - std Z+21,r13 - std Z+22,r15 - std Z+23,r14 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - eor r1,r1 - ret - .size sliscp_light192_permute, .-sliscp_light192_permute - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-256-spoc-avr.S b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-256-spoc-avr.S deleted file mode 100644 index 84925b4..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-256-spoc-avr.S +++ /dev/null @@ -1,1142 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 72 -table_0: - .byte 15 - .byte 71 - .byte 8 - .byte 100 - .byte 4 - .byte 178 - .byte 134 - .byte 107 - .byte 67 - .byte 181 - .byte 226 - .byte 111 - .byte 241 - .byte 55 - .byte 137 - .byte 44 - .byte 68 - .byte 150 - .byte 230 - .byte 221 - .byte 115 - .byte 238 - .byte 202 - .byte 153 - .byte 229 - .byte 76 - .byte 23 - .byte 234 - .byte 11 - .byte 245 - .byte 142 - .byte 15 - .byte 71 - .byte 7 - .byte 100 - .byte 4 - .byte 178 - .byte 130 - .byte 107 - .byte 67 - .byte 181 - .byte 161 - .byte 111 - .byte 241 - .byte 55 - .byte 120 - .byte 44 - .byte 68 - .byte 150 - .byte 162 - .byte 221 - .byte 115 - .byte 238 - .byte 185 - .byte 153 - .byte 229 - .byte 76 - .byte 242 - .byte 234 - .byte 11 - .byte 245 - .byte 133 - .byte 15 - .byte 71 - .byte 7 - .byte 35 - .byte 4 - .byte 178 - .byte 130 - .byte 217 - .byte 67 - .byte 181 - - .text -.global sliscp_light256_permute_spoc - .type sliscp_light256_permute_spoc, @function -sliscp_light256_permute_spoc: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 31 - ld r21,Z - ldd r20,Z+1 - ldd r19,Z+2 - ldd r18,Z+3 - ldd r27,Z+4 - ldd r26,Z+5 - ldd r23,Z+6 - ldd r22,Z+7 - ldd r5,Z+8 - ldd r4,Z+9 - ldd r3,Z+10 - ldd r2,Z+11 - ldd r9,Z+12 - ldd r8,Z+13 - ldd r7,Z+14 - ldd r6,Z+15 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - std Y+5,r22 - std Y+6,r23 - std Y+7,r26 - std Y+8,r27 - std Y+9,r2 - std Y+10,r3 - std Y+11,r4 - std Y+12,r5 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - ldd r21,Z+16 - ldd r20,Z+17 - ldd r19,Z+18 - ldd r18,Z+19 - ldd r27,Z+20 - ldd r26,Z+21 - ldd r23,Z+22 - ldd r22,Z+23 - ldd r5,Z+24 - ldd r4,Z+25 - ldd r3,Z+26 - ldd r2,Z+27 - ldd r9,Z+28 - ldd r8,Z+29 - ldd r7,Z+30 - ldd r6,Z+31 - push r31 - push r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - mov r30,r1 -52: -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - inc r30 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 - movw r12,r18 - movw r14,r20 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r18 - and r13,r19 - and r14,r20 - and r15,r21 - eor r22,r12 - eor r23,r13 - eor r26,r14 - eor r27,r15 - com r23 - com r26 - com r27 - ldi r24,255 - lsr r10 - rol r24 - eor r22,r24 - movw r12,r22 - movw r14,r26 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r22 - and r13,r23 - and r14,r26 - and r15,r27 - eor r18,r12 - eor r19,r13 - eor r20,r14 - eor r21,r15 - com r19 - com r20 - com r21 - ldi r24,255 - lsr r10 - rol r24 - eor r18,r24 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - inc r30 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - movw r12,r2 - movw r14,r4 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r2 - and r13,r3 - and r14,r4 - and r15,r5 - eor r6,r12 - eor r7,r13 - eor r8,r14 - eor r9,r15 - com r7 - com r8 - com r9 - ldi r24,255 - lsr r10 - rol r24 - eor r6,r24 - movw r12,r6 - movw r14,r8 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - lsl r12 - rol r13 - rol r14 - rol r15 - adc r12,r1 - and r12,r6 - and r13,r7 - and r14,r8 - and r15,r9 - eor r2,r12 - eor r3,r13 - eor r4,r14 - eor r5,r15 - com r3 - com r4 - com r5 - ldi r24,255 - lsr r10 - rol r24 - eor r2,r24 - ldd r12,Y+1 - ldd r13,Y+2 - ldd r14,Y+3 - ldd r15,Y+4 - com r12 - com r13 - com r14 - com r15 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - std Y+1,r18 - std Y+2,r19 - std Y+3,r20 - std Y+4,r21 - ldd r18,Y+9 - ldd r19,Y+10 - ldd r20,Y+11 - ldd r21,Y+12 - com r18 - com r19 - com r20 - com r21 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - std Y+9,r2 - std Y+10,r3 - std Y+11,r4 - std Y+12,r5 - movw r2,r12 - movw r4,r14 - ldd r12,Y+5 - ldd r13,Y+6 - ldd r14,Y+7 - ldd r15,Y+8 - com r13 - com r14 - com r15 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - eor r12,r10 - inc r30 - eor r12,r22 - eor r13,r23 - eor r14,r26 - eor r15,r27 - std Y+5,r22 - std Y+6,r23 - std Y+7,r26 - std Y+8,r27 - ldd r22,Y+13 - ldd r23,Y+14 - ldd r26,Y+15 - ldd r27,Y+16 - com r23 - com r26 - com r27 -#if defined(RAMPZ) - elpm r10,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r10,Z -#elif defined(__AVR_TINY__) - ld r10,Z -#else - lpm - mov r10,r0 -#endif - eor r22,r10 - inc r30 - eor r22,r6 - eor r23,r7 - eor r26,r8 - eor r27,r9 - std Y+13,r6 - std Y+14,r7 - std Y+15,r8 - std Y+16,r9 - movw r6,r12 - movw r8,r14 - ldi r25,72 - cpse r30,r25 - rjmp 52b -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - pop r30 - pop r31 - std Z+16,r21 - std Z+17,r20 - std Z+18,r19 - std Z+19,r18 - std Z+20,r27 - std Z+21,r26 - std Z+22,r23 - std Z+23,r22 - std Z+24,r5 - std Z+25,r4 - std Z+26,r3 - std Z+27,r2 - std Z+28,r9 - std Z+29,r8 - std Z+30,r7 - std Z+31,r6 - ldd r18,Y+1 - ldd r19,Y+2 - ldd r20,Y+3 - ldd r21,Y+4 - ldd r22,Y+5 - ldd r23,Y+6 - ldd r26,Y+7 - ldd r27,Y+8 - ldd r2,Y+9 - ldd r3,Y+10 - ldd r4,Y+11 - ldd r5,Y+12 - ldd r6,Y+13 - ldd r7,Y+14 - ldd r8,Y+15 - ldd r9,Y+16 - st Z,r21 - std Z+1,r20 - std Z+2,r19 - std Z+3,r18 - std Z+4,r27 - std Z+5,r26 - std Z+6,r23 - std Z+7,r22 - std Z+8,r5 - std Z+9,r4 - std Z+10,r3 - std Z+11,r2 - std Z+12,r9 - std Z+13,r8 - std Z+14,r7 - std Z+15,r6 - adiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r15 - pop r14 - pop r13 - pop r12 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size sliscp_light256_permute_spoc, .-sliscp_light256_permute_spoc - - .text -.global sliscp_light256_swap_spoc - .type sliscp_light256_swap_spoc, @function -sliscp_light256_swap_spoc: - movw r30,r24 -.L__stack_usage = 2 - ldd r18,Z+8 - ldd r19,Z+9 - ldd r20,Z+10 - ldd r21,Z+11 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r26,Z+18 - ldd r27,Z+19 - std Z+16,r18 - std Z+17,r19 - std Z+18,r20 - std Z+19,r21 - std Z+8,r22 - std Z+9,r23 - std Z+10,r26 - std Z+11,r27 - ldd r18,Z+12 - ldd r19,Z+13 - ldd r20,Z+14 - ldd r21,Z+15 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r26,Z+22 - ldd r27,Z+23 - std Z+20,r18 - std Z+21,r19 - std Z+22,r20 - std Z+23,r21 - std Z+12,r22 - std Z+13,r23 - std Z+14,r26 - std Z+15,r27 - ret - .size sliscp_light256_swap_spoc, .-sliscp_light256_swap_spoc - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.c b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.c deleted file mode 100644 index dd3a688..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-sliscp-light.h" - -#if !defined(__AVR__) - -/** - * \brief Performs one round of the Simeck-64 block cipher. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - */ -#define simeck64_round(x, y) \ - do { \ - (y) ^= (leftRotate5((x)) & (x)) ^ leftRotate1((x)) ^ \ - 0xFFFFFFFEU ^ (_rc & 1); \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 64-bit block with the 8 round version of Simeck-64. - * - * \param x Left half of the 64-bit block. - * \param y Right half of the 64-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck64_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck64_round(x, y); /* Round 1 */ \ - simeck64_round(y, x); /* Round 2 */ \ - simeck64_round(x, y); /* Round 3 */ \ - simeck64_round(y, x); /* Round 4 */ \ - simeck64_round(x, y); /* Round 5 */ \ - simeck64_round(y, x); /* Round 6 */ \ - simeck64_round(x, y); /* Round 7 */ \ - simeck64_round(y, x); /* Round 8 */ \ - } while (0) - -/* Helper macros for 48-bit left rotations */ -#define leftRotate5_48(x) (((x) << 5) | ((x) >> 19)) -#define leftRotate1_48(x) (((x) << 1) | ((x) >> 23)) - -/** - * \brief Performs one round of the Simeck-48 block cipher. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - */ -#define simeck48_round(x, y) \ - do { \ - (y) ^= (leftRotate5_48((x)) & (x)) ^ leftRotate1_48((x)) ^ \ - 0x00FFFFFEU ^ (_rc & 1); \ - (y) &= 0x00FFFFFFU; \ - _rc >>= 1; \ - } while (0) - -/** - * \brief Encrypts a 48-bit block with the 6 round version of Simeck-48. - * - * \param x Left half of the 48-bit block. - * \param y Right half of the 48-bit block. - * \param rc Round constants for the 8 rounds, 1 bit per round. - * - * It is assumed that the two halves have already been converted from - * big-endian to host byte order before calling this function. The output - * halves will also be in host byte order. - */ -#define simeck48_box(x, y, rc) \ - do { \ - unsigned char _rc = (rc); \ - simeck48_round(x, y); /* Round 1 */ \ - simeck48_round(y, x); /* Round 2 */ \ - simeck48_round(x, y); /* Round 3 */ \ - simeck48_round(y, x); /* Round 4 */ \ - simeck48_round(x, y); /* Round 5 */ \ - simeck48_round(y, x); /* Round 6 */ \ - } while (0) - -/* Interleaved rc0, rc1, sc0, and sc1 values for each round */ -static unsigned char const sliscp_light256_RC[18 * 4] = { - 0x0f, 0x47, 0x08, 0x64, 0x04, 0xb2, 0x86, 0x6b, - 0x43, 0xb5, 0xe2, 0x6f, 0xf1, 0x37, 0x89, 0x2c, - 0x44, 0x96, 0xe6, 0xdd, 0x73, 0xee, 0xca, 0x99, - 0xe5, 0x4c, 0x17, 0xea, 0x0b, 0xf5, 0x8e, 0x0f, - 0x47, 0x07, 0x64, 0x04, 0xb2, 0x82, 0x6b, 0x43, - 0xb5, 0xa1, 0x6f, 0xf1, 0x37, 0x78, 0x2c, 0x44, - 0x96, 0xa2, 0xdd, 0x73, 0xee, 0xb9, 0x99, 0xe5, - 0x4c, 0xf2, 0xea, 0x0b, 0xf5, 0x85, 0x0f, 0x47, - 0x07, 0x23, 0x04, 0xb2, 0x82, 0xd9, 0x43, 0xb5 -}; - -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 24); /* Assumes the block is pre-swapped */ - x4 = be_load_word32(block + 16); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 12); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 8, x2); - be_store_word32(block + 24, x3); /* Assumes the block is pre-swapped */ - be_store_word32(block + 16, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 12, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spix(unsigned char block[32]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 12); - t2 = le_load_word32(block + 24); - le_store_word32(block + 24, t1); - le_store_word32(block + 12, t2); -} - -void sliscp_light256_permute_spoc(unsigned char block[32]) -{ - const unsigned char *rc = sliscp_light256_RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 4); - x2 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x3 = be_load_word32(block + 20); - x4 = be_load_word32(block + 8); - x5 = be_load_word32(block + 12); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-64 to two of the 64-bit sub-blocks */ - simeck64_box(x2, x3, rc[0]); - simeck64_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0xFFFFFFFFU; - x1 ^= 0xFFFFFF00U ^ rc[2]; - x4 ^= 0xFFFFFFFFU; - x5 ^= 0xFFFFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 4, x1); - be_store_word32(block + 16, x2); /* Assumes the block is pre-swapped */ - be_store_word32(block + 20, x3); - be_store_word32(block + 8, x4); - be_store_word32(block + 12, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); -} - -void sliscp_light256_swap_spoc(unsigned char block[32]) -{ - uint64_t t1, t2; - t1 = le_load_word64(block + 8); - t2 = le_load_word64(block + 16); - le_store_word64(block + 16, t1); - le_store_word64(block + 8, t2); -} - -/* Load a big-endian 24-bit word from a byte buffer */ -#define be_load_word24(ptr) \ - ((((uint32_t)((ptr)[0])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[2]))) - -/* Store a big-endian 24-bit word into a byte buffer */ -#define be_store_word24(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 16); \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)_x; \ - } while (0) - -void sliscp_light192_permute(unsigned char block[24]) -{ - /* Interleaved rc0, rc1, sc0, and sc1 values for each round */ - static unsigned char const RC[18 * 4] = { - 0x07, 0x27, 0x08, 0x29, 0x04, 0x34, 0x0c, 0x1d, - 0x06, 0x2e, 0x0a, 0x33, 0x25, 0x19, 0x2f, 0x2a, - 0x17, 0x35, 0x38, 0x1f, 0x1c, 0x0f, 0x24, 0x10, - 0x12, 0x08, 0x36, 0x18, 0x3b, 0x0c, 0x0d, 0x14, - 0x26, 0x0a, 0x2b, 0x1e, 0x15, 0x2f, 0x3e, 0x31, - 0x3f, 0x38, 0x01, 0x09, 0x20, 0x24, 0x21, 0x2d, - 0x30, 0x36, 0x11, 0x1b, 0x28, 0x0d, 0x39, 0x16, - 0x3c, 0x2b, 0x05, 0x3d, 0x22, 0x3e, 0x27, 0x03, - 0x13, 0x01, 0x34, 0x02, 0x1a, 0x21, 0x2e, 0x23 - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables. Each 24-bit block is - * placed into a separate 32-bit word which improves efficiency below */ - x0 = be_load_word24(block); - x1 = be_load_word24(block + 3); - x2 = be_load_word24(block + 6); - x3 = be_load_word24(block + 9); - x4 = be_load_word24(block + 12); - x5 = be_load_word24(block + 15); - x6 = be_load_word24(block + 18); - x7 = be_load_word24(block + 21); - - /* Perform all permutation rounds */ - for (round = 0; round < 18; ++round, rc += 4) { - /* Apply Simeck-48 to two of the 48-bit sub-blocks */ - simeck48_box(x2, x3, rc[0]); - simeck48_box(x6, x7, rc[1]); - - /* Add step constants */ - x0 ^= 0x00FFFFFFU; - x1 ^= 0x00FFFF00U ^ rc[2]; - x4 ^= 0x00FFFFFFU; - x5 ^= 0x00FFFF00U ^ rc[3]; - - /* Mix the sub-blocks */ - t0 = x0 ^ x2; - t1 = x1 ^ x3; - x0 = x2; - x1 = x3; - x2 = x4 ^ x6; - x3 = x5 ^ x7; - x4 = x6; - x5 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word24(block, x0); - be_store_word24(block + 3, x1); - be_store_word24(block + 6, x2); - be_store_word24(block + 9, x3); - be_store_word24(block + 12, x4); - be_store_word24(block + 15, x5); - be_store_word24(block + 18, x6); - be_store_word24(block + 21, x7); -} - -void sliscp_light320_permute(unsigned char block[40]) -{ - /* Interleaved rc0, rc1, rc2, sc0, sc1, and sc2 values for each round */ - static unsigned char const RC[16 * 6] = { - 0x07, 0x53, 0x43, 0x50, 0x28, 0x14, 0x0a, 0x5d, - 0xe4, 0x5c, 0xae, 0x57, 0x9b, 0x49, 0x5e, 0x91, - 0x48, 0x24, 0xe0, 0x7f, 0xcc, 0x8d, 0xc6, 0x63, - 0xd1, 0xbe, 0x32, 0x53, 0xa9, 0x54, 0x1a, 0x1d, - 0x4e, 0x60, 0x30, 0x18, 0x22, 0x28, 0x75, 0x68, - 0x34, 0x9a, 0xf7, 0x6c, 0x25, 0xe1, 0x70, 0x38, - 0x62, 0x82, 0xfd, 0xf6, 0x7b, 0xbd, 0x96, 0x47, - 0xf9, 0x9d, 0xce, 0x67, 0x71, 0x6b, 0x76, 0x40, - 0x20, 0x10, 0xaa, 0x88, 0xa0, 0x4f, 0x27, 0x13, - 0x2b, 0xdc, 0xb0, 0xbe, 0x5f, 0x2f, 0xe9, 0x8b, - 0x09, 0x5b, 0xad, 0xd6, 0xcf, 0x59, 0x1e, 0xe9, - 0x74, 0xba, 0xb7, 0xc6, 0xad, 0x7f, 0x3f, 0x1f - }; - const unsigned char *rc = RC; - uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; - uint32_t t0, t1; - unsigned round; - - /* Load the block into local state variables */ - x0 = be_load_word32(block); - x1 = be_load_word32(block + 16); /* Assumes the block is pre-swapped */ - x2 = be_load_word32(block + 8); - x3 = be_load_word32(block + 12); - x4 = be_load_word32(block + 4); - x5 = be_load_word32(block + 20); - x6 = be_load_word32(block + 24); - x7 = be_load_word32(block + 28); - x8 = be_load_word32(block + 32); - x9 = be_load_word32(block + 36); - - /* Perform all permutation rounds */ - for (round = 0; round < 16; ++round, rc += 6) { - /* Apply Simeck-64 to three of the 64-bit sub-blocks */ - simeck64_box(x0, x1, rc[0]); - simeck64_box(x4, x5, rc[1]); - simeck64_box(x8, x9, rc[2]); - x6 ^= x8; - x7 ^= x9; - x2 ^= x4; - x3 ^= x5; - x8 ^= x0; - x9 ^= x1; - - /* Add step constants */ - x2 ^= 0xFFFFFFFFU; - x3 ^= 0xFFFFFF00U ^ rc[3]; - x6 ^= 0xFFFFFFFFU; - x7 ^= 0xFFFFFF00U ^ rc[4]; - x8 ^= 0xFFFFFFFFU; - x9 ^= 0xFFFFFF00U ^ rc[5]; - - /* Rotate the sub-blocks */ - t0 = x8; - t1 = x9; - x8 = x2; - x9 = x3; - x2 = x4; - x3 = x5; - x4 = x0; - x5 = x1; - x0 = x6; - x1 = x7; - x6 = t0; - x7 = t1; - } - - /* Store the state back into the block */ - be_store_word32(block, x0); - be_store_word32(block + 16, x1); /* Assumes the block is pre-swapped */ - be_store_word32(block + 8, x2); - be_store_word32(block + 12, x3); - be_store_word32(block + 4, x4); - be_store_word32(block + 20, x5); - be_store_word32(block + 24, x6); - be_store_word32(block + 28, x7); - be_store_word32(block + 32, x8); - be_store_word32(block + 36, x9); -} - -void sliscp_light320_swap(unsigned char block[40]) -{ - uint32_t t1, t2; - t1 = le_load_word32(block + 4); - t2 = le_load_word32(block + 16); - le_store_word32(block + 16, t1); - le_store_word32(block + 4, t2); -} - -#endif /* !__AVR__ */ diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.h b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.h deleted file mode 100644 index 8a5e8d5..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-sliscp-light.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SLISCP_LIGHT_H -#define LW_INTERNAL_SLISCP_LIGHT_H - -/** - * \file internal-sliscp-light.h - * \brief sLiSCP-light permutation - * - * There are three variants of sLiSCP-light in use in the NIST submissions: - * - * \li sLiSCP-light-256 with a 256-bit block size, used in SPIX and SpoC. - * \li sLiSCP-light-192 with a 192-bit block size, used in SpoC. - * \li sLiSCP-light-320 with a 320-bit block size, used in ACE. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/ace, - * https://uwaterloo.ca/communications-security-lab/lwc/spix, - * https://uwaterloo.ca/communications-security-lab/lwc/spoc - */ - -#include "internal-util.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the state for sLiSCP-light-256. - */ -#define SLISCP_LIGHT256_STATE_SIZE 32 - -/** - * \brief Size of the state for sLiSCP-light-192. - */ -#define SLISCP_LIGHT192_STATE_SIZE 24 - -/** - * \brief Size of the state for sLiSCP-light-320. - */ -#define SLISCP_LIGHT320_STATE_SIZE 40 - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SPIX cipher. SPIX places the rate bytes at - * positions 8, 9, 10, 11, 24, 25, 26, and 27. - * - * This function assumes that bytes 24-27 have been pre-swapped with - * bytes 12-15 so that the rate portion of the state is contiguous. - * - * The sliscp_light256_swap_spix() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spix() - */ -void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SPIX. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spix() - */ -void sliscp_light256_swap_spix(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 256-bit block. - * - * \param block Points to the block to be permuted. - * - * The bytes of the block are assumed to be rearranged to match the - * requirements of the SpoC-128 cipher. SpoC-128 interleaves the - * rate bytes and the mask bytes. This version assumes that the - * rate and mask are in contiguous bytes of the state. - * - * SpoC-128 absorbs bytes using the mask bytes of the state at offsets - * 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, and 31. - * It squeezes bytes using the rate bytes of the state at offsets - * 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, and 23. - * - * This function assumes that bytes 8-15 have been pre-swapped with 16-23 - * so that the rate and mask portions of the state are contiguous. - * - * The sliscp_light256_swap_spoc() function can be used to switch - * between the canonical order and the pre-swapped order. - * - * \sa sliscp_light256_swap_spoc() - */ -void sliscp_light256_permute_spoc(unsigned char block[32]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light256_permute_spoc() - */ -void sliscp_light256_swap_spoc(unsigned char block[32]); - -/** - * \brief Performs the sLiSCP-light permutation on a 192-bit block. - * - * \param block Points to the block to be permuted. - */ -void sliscp_light192_permute(unsigned char block[24]); - -/** - * \brief Performs the sLiSCP-light permutation on a 320-bit block. - * - * \param block Points to the block to be permuted. - * - * The ACE specification refers to this permutation as "ACE" but that - * can be confused with the name of the AEAD mode so we call this - * permutation "sLiSCP-light-320" instead. - * - * ACE absorbs and squeezes data at the rate bytes 0, 1, 2, 3, 16, 17, 18, 19. - * Efficiency can suffer because of the discontinuity in rate byte positions. - * - * To counteract this, we assume that the input to the permutation has been - * pre-swapped: bytes 4, 5, 6, 7 are swapped with bytes 16, 17, 18, 19 so - * that the rate is contiguous at the start of the state. - * - * The sliscp_light320_swap() function can be used to switch between the - * canonical order and the pre-swapped order. - * - * \sa sliscp_light320_swap() - */ -void sliscp_light320_permute(unsigned char block[40]); - -/** - * \brief Swaps rate bytes in a sLiSCP-light 320-bit block. - * - * \param block Points to the block to be rate-swapped. - * - * \sa sliscp_light320_permute() - */ -void sliscp_light320_swap(unsigned char block[40]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-util.h b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.c b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.c deleted file mode 100644 index 92ee233..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.c +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "spoc.h" -#include "internal-sliscp-light.h" -#include "internal-util.h" -#include - -/** - * \brief Size of the state for the internal sLiSCP-light-256 permutation. - */ -#define SPOC_128_STATE_SIZE SLISCP_LIGHT256_STATE_SIZE - -/** - * \brief Rate for absorbing data into the sLiSCP-light-256 state and for - * squeezing data out again. - */ -#define SPOC_128_RATE 16 - -/** - * \brief Size of the state for the internal sLiSCP-light-192 permutation. - */ -#define SPOC_64_STATE_SIZE SLISCP_LIGHT192_STATE_SIZE - -/** - * \brief Rate for absorbing data into the sLiSCP-light-192 state and for - * squeezing data out again. - */ -#define SPOC_64_RATE 8 - -aead_cipher_t const spoc_128_cipher = { - "SpoC-128", - SPOC_KEY_SIZE, - SPOC_NONCE_SIZE, - SPOC_128_TAG_SIZE, - AEAD_FLAG_NONE, - spoc_128_aead_encrypt, - spoc_128_aead_decrypt -}; - -aead_cipher_t const spoc_64_cipher = { - "SpoC-64", - SPOC_KEY_SIZE, - SPOC_NONCE_SIZE, - SPOC_64_TAG_SIZE, - AEAD_FLAG_NONE, - spoc_64_aead_encrypt, - spoc_64_aead_decrypt -}; - -/* Indices of where a rate byte is located to help with padding */ -/* -static unsigned char const spoc_128_rate_posn[16] = { - 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 -}; -static unsigned char const spoc_128_mask_posn[16] = { - 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 -}; -*/ -static unsigned char const spoc_64_rate_posn[8] = { - 0, 1, 2, 3, 12, 13, 14, 15 -}; -static unsigned char const spoc_64_mask_posn[8] = { - 6, 7, 8, 9, 18, 19, 20, 21 -}; - -/** - * \brief Initializes the SpoC-128 state. - * - * \param state sLiSCP-light-256 permutation state. - * \param k Points to the 128-bit key. - * \param npub Points to the 128-bit nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void spoc_128_init - (unsigned char state[SPOC_128_STATE_SIZE], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state by combining the key and nonce */ - memcpy(state, npub, 16); - memcpy(state + 16, k, 16); - - /* Absorb the associated data into the state */ - if (adlen != 0) { - while (adlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, ad, SPOC_128_RATE); - state[0] ^= 0x20; /* domain separation */ - ad += SPOC_128_RATE; - adlen -= SPOC_128_RATE; - } - temp = (unsigned)adlen; - if (temp > 0) { - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, ad, temp); - state[temp + 16] ^= 0x80; /* padding */ - state[0] ^= 0x30; /* domain separation */ - } - } -} - -/** - * \brief Initializes the SpoC-64 state. - * - * \param state sLiSCP-light-192 permutation state. - * \param k Points to the 128-bit key. - * \param npub Points to the 128-bit nonce. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void spoc_64_init - (unsigned char state[SPOC_64_STATE_SIZE], - const unsigned char *k, const unsigned char *npub, - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Initialize the state by interleaving the key and nonce */ - memcpy(state, npub, 4); - state[4] = k[6]; - state[5] = k[7]; - memcpy(state + 6, k, 6); - memcpy(state + 12, npub + 4, 4); - state[16] = k[14]; - state[17] = k[15]; - memcpy(state + 18, k + 8, 6); - sliscp_light192_permute(state); - lw_xor_block(state + 6, npub + 8, 4); - lw_xor_block(state + 18, npub + 12, 4); - - /* Absorb the associated data into the state */ - if (adlen != 0) { - while (adlen >= SPOC_64_RATE) { - sliscp_light192_permute(state); - lw_xor_block(state + 6, ad, 4); - lw_xor_block(state + 18, ad + 4, 4); - state[0] ^= 0x20; /* domain separation */ - ad += SPOC_64_RATE; - adlen -= SPOC_64_RATE; - } - temp = (unsigned)adlen; - if (temp > 0) { - sliscp_light192_permute(state); - state[spoc_64_mask_posn[temp]] ^= 0x80; /* padding */ - state[0] ^= 0x30; /* domain separation */ - while (temp > 0) { - --temp; - state[spoc_64_mask_posn[temp]] ^= ad[temp]; - } - } - } -} - -/** - * \brief Finalizes the SpoC-128 encryption or decryption operation. - * - * \param state sLiSCP-light-256 permutation state. - * \param tag Points to the 16 byte buffer to receive the computed tag. - */ -static void spoc_128_finalize - (unsigned char state[SPOC_128_STATE_SIZE], unsigned char *tag) -{ - /* Pad and permute the state one more time */ - state[0] ^= 0x80; - sliscp_light256_permute_spoc(state); - - /* Copy out the authentication tag */ - memcpy(tag, state + 16, 16); -} - -/** - * \brief Finalizes the SpoC-64 encryption or decryption operation. - * - * \param state sLiSCP-light-192 permutation state. - * \param tag Points to the 16 byte buffer to receive the computed tag. - */ -static void spoc_64_finalize - (unsigned char state[SPOC_64_STATE_SIZE], unsigned char *tag) -{ - /* Pad and permute the state one more time */ - state[0] ^= 0x80; - sliscp_light192_permute(state); - - /* Copy out the authentication tag */ - memcpy(tag, state + 6, 4); - memcpy(tag + 4, state + 18, 4); -} - -int spoc_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_128_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOC_128_TAG_SIZE; - - /* Initialize the SpoC-128 state and absorb the associated data */ - spoc_128_init(state, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen != 0) { - while (mlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, m, SPOC_128_RATE); - lw_xor_block_2_src(c, m, state, SPOC_128_RATE); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_128_RATE; - m += SPOC_128_RATE; - mlen -= SPOC_128_RATE; - } - if (mlen != 0) { - unsigned temp = (unsigned)mlen; - sliscp_light256_permute_spoc(state); - lw_xor_block(state + 16, m, temp); - lw_xor_block_2_src(c, m, state, temp); - state[temp + 16] ^= 0x80; /* padding */ - state[0] ^= 0x50; /* domain separation */ - c += mlen; - } - } - - /* Finalize and generate the authentication tag */ - spoc_128_finalize(state, c); - return 0; -} - -int spoc_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_128_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOC_128_TAG_SIZE) - return -1; - *mlen = clen - SPOC_128_TAG_SIZE; - - /* Initialize the Spoc-128 state and absorb the associated data */ - spoc_128_init(state, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOC_128_TAG_SIZE; - if (clen != 0) { - while (clen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state); - lw_xor_block_2_src(m, c, state, SPOC_128_RATE); - lw_xor_block(state + 16, m, SPOC_128_RATE); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_128_RATE; - m += SPOC_128_RATE; - clen -= SPOC_128_RATE; - } - if (clen != 0) { - unsigned temp = (unsigned)clen; - sliscp_light256_permute_spoc(state); - lw_xor_block_2_src(m, c, state, temp); - lw_xor_block(state + 16, m, temp); - state[temp + 16] ^= 0x80; /* padding */ - state[0] ^= 0x50; /* domain separation */ - c += clen; - } - } - - /* Finalize and check the authentication tag */ - spoc_128_finalize(state, state); - return aead_check_tag(mtemp, *mlen, state, c, SPOC_128_TAG_SIZE); -} - -int spoc_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_64_STATE_SIZE]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOC_64_TAG_SIZE; - - /* Initialize the SpoC-64 state and absorb the associated data */ - spoc_64_init(state, k, npub, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen != 0) { - while (mlen >= SPOC_64_RATE) { - sliscp_light192_permute(state); - lw_xor_block(state + 6, m, 4); - lw_xor_block(state + 18, m + 4, 4); - lw_xor_block_2_src(c, m, state, 4); - lw_xor_block_2_src(c + 4, m + 4, state + 12, 4); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_64_RATE; - m += SPOC_64_RATE; - mlen -= SPOC_64_RATE; - } - if (mlen != 0) { - unsigned temp = (unsigned)mlen; - sliscp_light192_permute(state); - state[spoc_64_mask_posn[temp]] ^= 0x80; /* padding */ - while (temp > 0) { - --temp; - unsigned char mbyte = m[temp]; - state[spoc_64_mask_posn[temp]] ^= mbyte; - c[temp] = mbyte ^ state[spoc_64_rate_posn[temp]]; - } - state[0] ^= 0x50; /* domain separation */ - c += mlen; - } - } - - /* Finalize and generate the authentication tag */ - spoc_64_finalize(state, c); - return 0; -} - -int spoc_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[SPOC_64_STATE_SIZE]; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOC_64_TAG_SIZE) - return -1; - *mlen = clen - SPOC_64_TAG_SIZE; - - /* Initialize the Spoc-64 state and absorb the associated data */ - spoc_64_init(state, k, npub, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOC_64_TAG_SIZE; - if (clen != 0) { - while (clen >= SPOC_64_RATE) { - sliscp_light192_permute(state); - lw_xor_block_2_src(m, c, state, 4); - lw_xor_block_2_src(m + 4, c + 4, state + 12, 4); - lw_xor_block(state + 6, m, 4); - lw_xor_block(state + 18, m + 4, 4); - state[0] ^= 0x40; /* domain separation */ - c += SPOC_64_RATE; - m += SPOC_64_RATE; - clen -= SPOC_64_RATE; - } - if (clen != 0) { - unsigned temp = (unsigned)clen; - sliscp_light192_permute(state); - state[spoc_64_mask_posn[temp]] ^= 0x80; /* padding */ - while (temp > 0) { - --temp; - unsigned char mbyte = c[temp] ^ state[spoc_64_rate_posn[temp]]; - state[spoc_64_mask_posn[temp]] ^= mbyte; - m[temp] = mbyte; - } - state[0] ^= 0x50; /* domain separation */ - c += clen; - } - } - - /* Finalize and check the authentication tag */ - spoc_64_finalize(state, state); - return aead_check_tag(mtemp, *mlen, state, c, SPOC_64_TAG_SIZE); -} diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.h b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.h deleted file mode 100644 index 712c2d0..0000000 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys-avr/spoc.h +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPOC_H -#define LWCRYPTO_SPOC_H - -#include "aead-common.h" - -/** - * \file spoc.h - * \brief SpoC authenticated encryption algorithm. - * - * SpoC is a family of authenticated encryption algorithms with two - * members, SpoC-128 and Spoc-64. The algorithms use a Beetle-like - * sponge construction built on top of the sLiSCP-light permutation. - * - * \li Spoc-128 has a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * It is built around the 256-bit version of the sLiSCP-light permutation. - * This is the primary member of the family. - * \li Spoc-64 has a 128-bit key, a 128-bit nonce, and a 64-bit tag. - * It is built around the 192-bit version of the sLiSCP-light permutation. - * - * Spoc-128 has good performance on small packets (16 bytes or less) - * on 32-bit embedded platforms. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/spoc - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SpoC variants. - */ -#define SPOC_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for SpoC-128. - */ -#define SPOC_128_TAG_SIZE 16 - -/** - * \brief Size of the authentication tag for SpoC-64. - */ -#define SPOC_64_TAG_SIZE 8 - -/** - * \brief Size of the nonce for all SpoC variants. - */ -#define SPOC_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the SpoC-128 cipher. - */ -extern aead_cipher_t const spoc_128_cipher; - -/** - * \brief Meta-information block for the SpoC-64 cipher. - */ -extern aead_cipher_t const spoc_64_cipher; - -/** - * \brief Encrypts and authenticates a packet with SpoC-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spoc_128_aead_decrypt() - */ -int spoc_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SpoC-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spoc_128_aead_encrypt() - */ -int spoc_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SpoC-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spoc_64_aead_decrypt() - */ -int spoc_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SpoC-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spoc_64_aead_encrypt() - */ -int spoc_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-192-avr.S b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-192-avr.S new file mode 100644 index 0000000..5860b14 --- /dev/null +++ b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-192-avr.S @@ -0,0 +1,794 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 72 +table_0: + .byte 7 + .byte 39 + .byte 8 + .byte 41 + .byte 4 + .byte 52 + .byte 12 + .byte 29 + .byte 6 + .byte 46 + .byte 10 + .byte 51 + .byte 37 + .byte 25 + .byte 47 + .byte 42 + .byte 23 + .byte 53 + .byte 56 + .byte 31 + .byte 28 + .byte 15 + .byte 36 + .byte 16 + .byte 18 + .byte 8 + .byte 54 + .byte 24 + .byte 59 + .byte 12 + .byte 13 + .byte 20 + .byte 38 + .byte 10 + .byte 43 + .byte 30 + .byte 21 + .byte 47 + .byte 62 + .byte 49 + .byte 63 + .byte 56 + .byte 1 + .byte 9 + .byte 32 + .byte 36 + .byte 33 + .byte 45 + .byte 48 + .byte 54 + .byte 17 + .byte 27 + .byte 40 + .byte 13 + .byte 57 + .byte 22 + .byte 60 + .byte 43 + .byte 5 + .byte 61 + .byte 34 + .byte 62 + .byte 39 + .byte 3 + .byte 19 + .byte 1 + .byte 52 + .byte 2 + .byte 26 + .byte 33 + .byte 46 + .byte 35 + + .text +.global sliscp_light192_permute + .type sliscp_light192_permute, @function +sliscp_light192_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 +.L__stack_usage = 18 + ld r20,Z + ldd r19,Z+1 + ldd r18,Z+2 + ldd r21,Z+3 + ldd r23,Z+4 + ldd r22,Z+5 + ldd r28,Z+6 + ldd r27,Z+7 + ldd r26,Z+8 + ldd r29,Z+9 + ldd r3,Z+10 + ldd r2,Z+11 + ldd r6,Z+12 + ldd r5,Z+13 + ldd r4,Z+14 + ldd r7,Z+15 + ldd r9,Z+16 + ldd r8,Z+17 + ldd r12,Z+18 + ldd r11,Z+19 + ldd r10,Z+20 + ldd r13,Z+21 + ldd r15,Z+22 + ldd r14,Z+23 + push r31 + push r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r24,0 +28: + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + inc r24 + movw r16,r26 + mov r1,r28 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r29,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r26 + and r17,r27 + and r1,r28 + eor r2,r16 + eor r3,r17 + eor r29,r1 + com r3 + com r29 + ldi r16,255 + lsr r25 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r29 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r26,r16 + eor r27,r17 + eor r28,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r29 + eor r26,r16 + eor r27,r17 + eor r28,r1 + com r27 + com r28 + ldi r16,255 + lsr r25 + rol r16 + eor r26,r16 + movw r16,r26 + mov r1,r28 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r29,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r26 + and r17,r27 + and r1,r28 + eor r2,r16 + eor r3,r17 + eor r29,r1 + com r3 + com r29 + ldi r16,255 + lsr r25 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r29 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r26,r16 + eor r27,r17 + eor r28,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r29 + eor r26,r16 + eor r27,r17 + eor r28,r1 + com r27 + com r28 + ldi r16,255 + lsr r25 + rol r16 + eor r26,r16 + movw r16,r26 + mov r1,r28 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r2,r16 + eor r3,r17 + eor r29,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r26 + and r17,r27 + and r1,r28 + eor r2,r16 + eor r3,r17 + eor r29,r1 + com r3 + com r29 + ldi r16,255 + lsr r25 + rol r16 + eor r2,r16 + movw r16,r2 + mov r1,r29 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r26,r16 + eor r27,r17 + eor r28,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r2 + and r17,r3 + and r1,r29 + eor r26,r16 + eor r27,r17 + eor r28,r1 + com r27 + com r28 + ldi r16,255 + lsr r25 + rol r16 + eor r26,r16 + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + inc r24 + movw r16,r10 + mov r1,r12 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r13,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + eor r14,r16 + eor r15,r17 + eor r13,r1 + com r15 + com r13 + ldi r16,255 + lsr r25 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r13 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r13 + eor r10,r16 + eor r11,r17 + eor r12,r1 + com r11 + com r12 + ldi r16,255 + lsr r25 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r13,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + eor r14,r16 + eor r15,r17 + eor r13,r1 + com r15 + com r13 + ldi r16,255 + lsr r25 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r13 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r13 + eor r10,r16 + eor r11,r17 + eor r12,r1 + com r11 + com r12 + ldi r16,255 + lsr r25 + rol r16 + eor r10,r16 + movw r16,r10 + mov r1,r12 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r14,r16 + eor r15,r17 + eor r13,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r10 + and r17,r11 + and r1,r12 + eor r14,r16 + eor r15,r17 + eor r13,r1 + com r15 + com r13 + ldi r16,255 + lsr r25 + rol r16 + eor r14,r16 + movw r16,r14 + mov r1,r13 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + eor r10,r16 + eor r11,r17 + eor r12,r1 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + bst r1,7 + lsl r16 + rol r17 + rol r1 + bld r16,0 + and r16,r14 + and r17,r15 + and r1,r13 + eor r10,r16 + eor r11,r17 + eor r12,r1 + com r11 + com r12 + ldi r16,255 + lsr r25 + rol r16 + eor r10,r16 + com r18 + com r19 + com r20 + com r23 + com r21 + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + eor r22,r25 + inc r24 + com r4 + com r5 + com r6 + com r9 + com r7 + mov r30,r24 +#if defined(RAMPZ) + elpm r25,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r25,Z +#elif defined(__AVR_TINY__) + ld r25,Z +#else + lpm + mov r25,r0 +#endif + eor r8,r25 + inc r24 + movw r16,r18 + mov r1,r20 + eor r16,r26 + eor r17,r27 + eor r1,r28 + movw r18,r26 + mov r20,r28 + movw r26,r4 + mov r28,r6 + eor r26,r10 + eor r27,r11 + eor r28,r12 + movw r4,r10 + mov r6,r12 + movw r10,r16 + mov r12,r1 + movw r16,r22 + mov r1,r21 + eor r16,r2 + eor r17,r3 + eor r1,r29 + movw r22,r2 + mov r21,r29 + movw r2,r8 + mov r29,r7 + eor r2,r14 + eor r3,r15 + eor r29,r13 + movw r8,r14 + mov r7,r13 + movw r14,r16 + mov r13,r1 + ldi r17,72 + cpse r24,r17 + rjmp 28b +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + st Z,r20 + std Z+1,r19 + std Z+2,r18 + std Z+3,r21 + std Z+4,r23 + std Z+5,r22 + std Z+6,r28 + std Z+7,r27 + std Z+8,r26 + std Z+9,r29 + std Z+10,r3 + std Z+11,r2 + std Z+12,r6 + std Z+13,r5 + std Z+14,r4 + std Z+15,r7 + std Z+16,r9 + std Z+17,r8 + std Z+18,r12 + std Z+19,r11 + std Z+20,r10 + std Z+21,r13 + std Z+22,r15 + std Z+23,r14 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + eor r1,r1 + ret + .size sliscp_light192_permute, .-sliscp_light192_permute + +#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-256-spoc-avr.S b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-256-spoc-avr.S new file mode 100644 index 0000000..84925b4 --- /dev/null +++ b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-256-spoc-avr.S @@ -0,0 +1,1142 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 72 +table_0: + .byte 15 + .byte 71 + .byte 8 + .byte 100 + .byte 4 + .byte 178 + .byte 134 + .byte 107 + .byte 67 + .byte 181 + .byte 226 + .byte 111 + .byte 241 + .byte 55 + .byte 137 + .byte 44 + .byte 68 + .byte 150 + .byte 230 + .byte 221 + .byte 115 + .byte 238 + .byte 202 + .byte 153 + .byte 229 + .byte 76 + .byte 23 + .byte 234 + .byte 11 + .byte 245 + .byte 142 + .byte 15 + .byte 71 + .byte 7 + .byte 100 + .byte 4 + .byte 178 + .byte 130 + .byte 107 + .byte 67 + .byte 181 + .byte 161 + .byte 111 + .byte 241 + .byte 55 + .byte 120 + .byte 44 + .byte 68 + .byte 150 + .byte 162 + .byte 221 + .byte 115 + .byte 238 + .byte 185 + .byte 153 + .byte 229 + .byte 76 + .byte 242 + .byte 234 + .byte 11 + .byte 245 + .byte 133 + .byte 15 + .byte 71 + .byte 7 + .byte 35 + .byte 4 + .byte 178 + .byte 130 + .byte 217 + .byte 67 + .byte 181 + + .text +.global sliscp_light256_permute_spoc + .type sliscp_light256_permute_spoc, @function +sliscp_light256_permute_spoc: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 31 + ld r21,Z + ldd r20,Z+1 + ldd r19,Z+2 + ldd r18,Z+3 + ldd r27,Z+4 + ldd r26,Z+5 + ldd r23,Z+6 + ldd r22,Z+7 + ldd r5,Z+8 + ldd r4,Z+9 + ldd r3,Z+10 + ldd r2,Z+11 + ldd r9,Z+12 + ldd r8,Z+13 + ldd r7,Z+14 + ldd r6,Z+15 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + std Y+5,r22 + std Y+6,r23 + std Y+7,r26 + std Y+8,r27 + std Y+9,r2 + std Y+10,r3 + std Y+11,r4 + std Y+12,r5 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + ldd r21,Z+16 + ldd r20,Z+17 + ldd r19,Z+18 + ldd r18,Z+19 + ldd r27,Z+20 + ldd r26,Z+21 + ldd r23,Z+22 + ldd r22,Z+23 + ldd r5,Z+24 + ldd r4,Z+25 + ldd r3,Z+26 + ldd r2,Z+27 + ldd r9,Z+28 + ldd r8,Z+29 + ldd r7,Z+30 + ldd r6,Z+31 + push r31 + push r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + mov r30,r1 +52: +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + inc r30 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 + movw r12,r18 + movw r14,r20 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r18 + and r13,r19 + and r14,r20 + and r15,r21 + eor r22,r12 + eor r23,r13 + eor r26,r14 + eor r27,r15 + com r23 + com r26 + com r27 + ldi r24,255 + lsr r10 + rol r24 + eor r22,r24 + movw r12,r22 + movw r14,r26 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r22 + and r13,r23 + and r14,r26 + and r15,r27 + eor r18,r12 + eor r19,r13 + eor r20,r14 + eor r21,r15 + com r19 + com r20 + com r21 + ldi r24,255 + lsr r10 + rol r24 + eor r18,r24 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + inc r30 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + movw r12,r2 + movw r14,r4 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r2 + and r13,r3 + and r14,r4 + and r15,r5 + eor r6,r12 + eor r7,r13 + eor r8,r14 + eor r9,r15 + com r7 + com r8 + com r9 + ldi r24,255 + lsr r10 + rol r24 + eor r6,r24 + movw r12,r6 + movw r14,r8 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + lsl r12 + rol r13 + rol r14 + rol r15 + adc r12,r1 + and r12,r6 + and r13,r7 + and r14,r8 + and r15,r9 + eor r2,r12 + eor r3,r13 + eor r4,r14 + eor r5,r15 + com r3 + com r4 + com r5 + ldi r24,255 + lsr r10 + rol r24 + eor r2,r24 + ldd r12,Y+1 + ldd r13,Y+2 + ldd r14,Y+3 + ldd r15,Y+4 + com r12 + com r13 + com r14 + com r15 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + std Y+1,r18 + std Y+2,r19 + std Y+3,r20 + std Y+4,r21 + ldd r18,Y+9 + ldd r19,Y+10 + ldd r20,Y+11 + ldd r21,Y+12 + com r18 + com r19 + com r20 + com r21 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + std Y+9,r2 + std Y+10,r3 + std Y+11,r4 + std Y+12,r5 + movw r2,r12 + movw r4,r14 + ldd r12,Y+5 + ldd r13,Y+6 + ldd r14,Y+7 + ldd r15,Y+8 + com r13 + com r14 + com r15 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + eor r12,r10 + inc r30 + eor r12,r22 + eor r13,r23 + eor r14,r26 + eor r15,r27 + std Y+5,r22 + std Y+6,r23 + std Y+7,r26 + std Y+8,r27 + ldd r22,Y+13 + ldd r23,Y+14 + ldd r26,Y+15 + ldd r27,Y+16 + com r23 + com r26 + com r27 +#if defined(RAMPZ) + elpm r10,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r10,Z +#elif defined(__AVR_TINY__) + ld r10,Z +#else + lpm + mov r10,r0 +#endif + eor r22,r10 + inc r30 + eor r22,r6 + eor r23,r7 + eor r26,r8 + eor r27,r9 + std Y+13,r6 + std Y+14,r7 + std Y+15,r8 + std Y+16,r9 + movw r6,r12 + movw r8,r14 + ldi r25,72 + cpse r30,r25 + rjmp 52b +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + pop r30 + pop r31 + std Z+16,r21 + std Z+17,r20 + std Z+18,r19 + std Z+19,r18 + std Z+20,r27 + std Z+21,r26 + std Z+22,r23 + std Z+23,r22 + std Z+24,r5 + std Z+25,r4 + std Z+26,r3 + std Z+27,r2 + std Z+28,r9 + std Z+29,r8 + std Z+30,r7 + std Z+31,r6 + ldd r18,Y+1 + ldd r19,Y+2 + ldd r20,Y+3 + ldd r21,Y+4 + ldd r22,Y+5 + ldd r23,Y+6 + ldd r26,Y+7 + ldd r27,Y+8 + ldd r2,Y+9 + ldd r3,Y+10 + ldd r4,Y+11 + ldd r5,Y+12 + ldd r6,Y+13 + ldd r7,Y+14 + ldd r8,Y+15 + ldd r9,Y+16 + st Z,r21 + std Z+1,r20 + std Z+2,r19 + std Z+3,r18 + std Z+4,r27 + std Z+5,r26 + std Z+6,r23 + std Z+7,r22 + std Z+8,r5 + std Z+9,r4 + std Z+10,r3 + std Z+11,r2 + std Z+12,r9 + std Z+13,r8 + std Z+14,r7 + std Z+15,r6 + adiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r15 + pop r14 + pop r13 + pop r12 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size sliscp_light256_permute_spoc, .-sliscp_light256_permute_spoc + + .text +.global sliscp_light256_swap_spoc + .type sliscp_light256_swap_spoc, @function +sliscp_light256_swap_spoc: + movw r30,r24 +.L__stack_usage = 2 + ldd r18,Z+8 + ldd r19,Z+9 + ldd r20,Z+10 + ldd r21,Z+11 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r26,Z+18 + ldd r27,Z+19 + std Z+16,r18 + std Z+17,r19 + std Z+18,r20 + std Z+19,r21 + std Z+8,r22 + std Z+9,r23 + std Z+10,r26 + std Z+11,r27 + ldd r18,Z+12 + ldd r19,Z+13 + ldd r20,Z+14 + ldd r21,Z+15 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r26,Z+22 + ldd r27,Z+23 + std Z+20,r18 + std Z+21,r19 + std Z+22,r20 + std Z+23,r21 + std Z+12,r22 + std Z+13,r23 + std Z+14,r26 + std Z+15,r27 + ret + .size sliscp_light256_swap_spoc, .-sliscp_light256_swap_spoc + +#endif diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.c b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.c index 69b4519..dd3a688 100644 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.c +++ b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.c @@ -22,6 +22,8 @@ #include "internal-sliscp-light.h" +#if !defined(__AVR__) + /** * \brief Performs one round of the Simeck-64 block cipher. * @@ -173,11 +175,12 @@ void sliscp_light256_swap_spix(unsigned char block[32]) le_store_word32(block + 12, t2); } -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) +void sliscp_light256_permute_spoc(unsigned char block[32]) { const unsigned char *rc = sliscp_light256_RC; uint32_t x0, x1, x2, x3, x4, x5, x6, x7; uint32_t t0, t1; + unsigned round; /* Load the block into local state variables */ x0 = be_load_word32(block); @@ -190,7 +193,7 @@ void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds) x7 = be_load_word32(block + 28); /* Perform all permutation rounds */ - for (; rounds > 0; --rounds, rc += 4) { + for (round = 0; round < 18; ++round, rc += 4) { /* Apply Simeck-64 to two of the 64-bit sub-blocks */ simeck64_box(x2, x3, rc[0]); simeck64_box(x6, x7, rc[1]); @@ -406,3 +409,5 @@ void sliscp_light320_swap(unsigned char block[40]) le_store_word32(block + 16, t1); le_store_word32(block + 4, t2); } + +#endif /* !__AVR__ */ diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.h b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.h index fa6b9ba..8a5e8d5 100644 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.h +++ b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-sliscp-light.h @@ -92,7 +92,6 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * \brief Performs the sLiSCP-light permutation on a 256-bit block. * * \param block Points to the block to be permuted. - * \param rounds Number of rounds to be performed, usually 9 or 18. * * The bytes of the block are assumed to be rearranged to match the * requirements of the SpoC-128 cipher. SpoC-128 interleaves the @@ -112,7 +111,7 @@ void sliscp_light256_swap_spix(unsigned char block[32]); * * \sa sliscp_light256_swap_spoc() */ -void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds); +void sliscp_light256_permute_spoc(unsigned char block[32]); /** * \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128. diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-util.h b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-util.h +++ b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/spoc.c b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/spoc.c index 1af7d59..92ee233 100644 --- a/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/spoc.c +++ b/spoc/Implementations/crypto_aead/spoc64sliscplight192v1/rhys/spoc.c @@ -106,7 +106,7 @@ static void spoc_128_init /* Absorb the associated data into the state */ if (adlen != 0) { while (adlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, ad, SPOC_128_RATE); state[0] ^= 0x20; /* domain separation */ ad += SPOC_128_RATE; @@ -114,7 +114,7 @@ static void spoc_128_init } temp = (unsigned)adlen; if (temp > 0) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, ad, temp); state[temp + 16] ^= 0x80; /* padding */ state[0] ^= 0x30; /* domain separation */ @@ -185,7 +185,7 @@ static void spoc_128_finalize { /* Pad and permute the state one more time */ state[0] ^= 0x80; - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); /* Copy out the authentication tag */ memcpy(tag, state + 16, 16); @@ -229,7 +229,7 @@ int spoc_128_aead_encrypt /* Encrypt the plaintext to produce the ciphertext */ if (mlen != 0) { while (mlen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, m, SPOC_128_RATE); lw_xor_block_2_src(c, m, state, SPOC_128_RATE); state[0] ^= 0x40; /* domain separation */ @@ -239,7 +239,7 @@ int spoc_128_aead_encrypt } if (mlen != 0) { unsigned temp = (unsigned)mlen; - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block(state + 16, m, temp); lw_xor_block_2_src(c, m, state, temp); state[temp + 16] ^= 0x80; /* padding */ @@ -277,7 +277,7 @@ int spoc_128_aead_decrypt clen -= SPOC_128_TAG_SIZE; if (clen != 0) { while (clen >= SPOC_128_RATE) { - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block_2_src(m, c, state, SPOC_128_RATE); lw_xor_block(state + 16, m, SPOC_128_RATE); state[0] ^= 0x40; /* domain separation */ @@ -287,7 +287,7 @@ int spoc_128_aead_decrypt } if (clen != 0) { unsigned temp = (unsigned)clen; - sliscp_light256_permute_spoc(state, 18); + sliscp_light256_permute_spoc(state); lw_xor_block_2_src(m, c, state, temp); lw_xor_block(state + 16, m, temp); state[temp + 16] ^= 0x80; /* padding */ diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.c b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.h b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/api.h b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/api.h deleted file mode 100644 index fb1dab8..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/encrypt.c b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/encrypt.c deleted file mode 100644 index df13efc..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "spook.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_384_mu_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_384_mu_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.c b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.c deleted file mode 100644 index 0e19216..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.c +++ /dev/null @@ -1,557 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-spook.h" - -/** - * \brief Number of steps in the Clyde-128 block cipher. - * - * This is also the number of steps in the Shadow-512 and Shadow-384 - * permutations. - */ -#define CLYDE128_STEPS 6 - -/** - * \brief Round constants for the steps of Clyde-128. - */ -static uint8_t const rc[CLYDE128_STEPS][8] = { - {1, 0, 0, 0, 0, 1, 0, 0}, - {0, 0, 1, 0, 0, 0, 0, 1}, - {1, 1, 0, 0, 0, 1, 1, 0}, - {0, 0, 1, 1, 1, 1, 0, 1}, - {1, 0, 1, 0, 0, 1, 0, 1}, - {1, 1, 1, 0, 0, 1, 1, 1} -}; - -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t c, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); - s0 = le_load_word32((const unsigned char *)&(input[0])); - s1 = le_load_word32((const unsigned char *)&(input[1])); - s2 = le_load_word32((const unsigned char *)&(input[2])); - s3 = le_load_word32((const unsigned char *)&(input[3])); -#endif - - /* Add the initial tweakey to the state */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Perform the two rounds of this step */ - #define clyde128_sbox(s0, s1, s2, s3) \ - do { \ - c = (s0 & s1) ^ s2; \ - d = (s3 & s0) ^ s1; \ - s2 = (c & d) ^ s3; \ - s3 = (c & s3) ^ s0; \ - s0 = d; \ - s1 = c; \ - } while (0) - #define clyde128_lbox(x, y) \ - do { \ - c = x ^ rightRotate12(x); \ - d = y ^ rightRotate12(y); \ - c ^= rightRotate3(c); \ - d ^= rightRotate3(d); \ - x = c ^ leftRotate15(x); \ - y = d ^ leftRotate15(y); \ - c = x ^ leftRotate1(x); \ - d = y ^ leftRotate1(y); \ - x ^= leftRotate6(d); \ - y ^= leftRotate7(c); \ - x ^= rightRotate15(c); \ - y ^= rightRotate15(d); \ - } while (0) - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - - /* Update the tweakey on the fly and add it to the state */ - c = t2 ^ t0; - d = t3 ^ t1; - t2 = t0; - t3 = t1; - t0 = c; - t1 = d; - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - } - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t a, b, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); -#endif - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all rounds in pairs */ - for (step = CLYDE128_STEPS - 1; step >= 0; --step) { - /* Add the tweakey to the state and update the tweakey */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - a = t2 ^ t0; - b = t3 ^ t1; - t0 = t2; - t1 = t3; - t2 = a; - t3 = b; - - /* Perform the two rounds of this step */ - #define clyde128_inv_sbox(s0, s1, s2, s3) \ - do { \ - d = (s0 & s1) ^ s2; \ - a = (s1 & d) ^ s3; \ - b = (d & a) ^ s0; \ - s2 = (a & b) ^ s1; \ - s0 = a; \ - s1 = b; \ - s3 = d; \ - } while (0) - #define clyde128_inv_lbox(x, y) \ - do { \ - a = x ^ leftRotate7(x); \ - b = y ^ leftRotate7(y); \ - x ^= leftRotate1(a); \ - y ^= leftRotate1(b); \ - x ^= leftRotate12(a); \ - y ^= leftRotate12(b); \ - a = x ^ leftRotate1(x); \ - b = y ^ leftRotate1(y); \ - x ^= leftRotate6(b); \ - y ^= leftRotate7(a); \ - a ^= leftRotate15(x); \ - b ^= leftRotate15(y); \ - x = rightRotate16(a); \ - y = rightRotate16(b); \ - } while (0) - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - } - - /* Add the tweakey to the state one last time */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void shadow512(shadow512_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t s30, s31, s32, s33; - uint32_t c, d, w, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; - s30 = state->W[12]; - s31 = state->W[13]; - s32 = state->W[14]; - s33 = state->W[15]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); - s30 = le_load_word32(state->B + 48); - s31 = le_load_word32(state->B + 52); - s32 = le_load_word32(state->B + 56); - s33 = le_load_word32(state->B + 60); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the S-box and L-box to bundle 3 */ - clyde128_sbox(s30, s31, s32, s33); - clyde128_lbox(s30, s31); - clyde128_lbox(s32, s33); - s30 ^= rc[step][0] << 3; - s31 ^= rc[step][1] << 3; - s32 ^= rc[step][2] << 3; - s33 ^= rc[step][3] << 3; - clyde128_sbox(s30, s31, s32, s33); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow512_diffusion_layer(row) \ - do { \ - w = s0##row; \ - x = s1##row; \ - y = s2##row; \ - z = s3##row; \ - c = w ^ x; \ - d = y ^ z; \ - s0##row = x ^ d; \ - s1##row = w ^ d; \ - s2##row = c ^ z; \ - s3##row = c ^ y; \ - } while (0) - shadow512_diffusion_layer(0); - shadow512_diffusion_layer(1); - shadow512_diffusion_layer(2); - shadow512_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - s30 ^= rc[step][4] << 3; - s31 ^= rc[step][5] << 3; - s32 ^= rc[step][6] << 3; - s33 ^= rc[step][7] << 3; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; - state->W[12] = s30; - state->W[13] = s31; - state->W[14] = s32; - state->W[15] = s33; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); - le_store_word32(state->B + 48, s30); - le_store_word32(state->B + 52, s31); - le_store_word32(state->B + 56, s32); - le_store_word32(state->B + 60, s33); -#endif -} - -void shadow384(shadow384_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t c, d, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow384_diffusion_layer(row) \ - do { \ - x = s0##row; \ - y = s1##row; \ - z = s2##row; \ - s0##row = x ^ y ^ z; \ - s1##row = x ^ z; \ - s2##row = x ^ y; \ - } while (0) - shadow384_diffusion_layer(0); - shadow384_diffusion_layer(1); - shadow384_diffusion_layer(2); - shadow384_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); -#endif -} diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.h b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.h deleted file mode 100644 index b08ce80..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-spook.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPOOK_H -#define LW_INTERNAL_SPOOK_H - -#include "internal-util.h" - -/** - * \file internal-spook.h - * \brief Internal implementation details of the Spook AEAD mode. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the block for the Clyde-128 block cipher. - */ -#define CLYDE128_BLOCK_SIZE 16 - -/** - * \brief Size of the key for the Clyde-128 block cipher. - */ -#define CLYDE128_KEY_SIZE 16 - -/** - * \brief Size of the tweak for the Clyde-128 block cipher. - */ -#define CLYDE128_TWEAK_SIZE 16 - -/** - * \brief Size of the state for Shadow-512. - */ -#define SHADOW512_STATE_SIZE 64 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-512 state. - */ -#define SHADOW512_RATE 32 - -/** - * \brief Size of the state for Shadow-384. - */ -#define SHADOW384_STATE_SIZE 48 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-384 state. - */ -#define SHADOW384_RATE 16 - -/** - * \brief Internal state of the Shadow-512 permutation. - */ -typedef union -{ - uint32_t W[SHADOW512_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW512_STATE_SIZE]; /**< Bytes of the state */ - -} shadow512_state_t; - -/** - * \brief Internal state of the Shadow-384 permutation. - */ -typedef union -{ - uint32_t W[SHADOW384_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW384_STATE_SIZE]; /**< Bytes of the state */ - -} shadow384_state_t; - -/** - * \brief Encrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to encrypt with. - * \param tweak Points to the tweak to encrypt with. - * \param output Output buffer for the ciphertext. - * \param input Input buffer for the plaintext. - * - * \sa clyde128_decrypt() - */ -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]); - -/** - * \brief Decrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to decrypt with. - * \param tweak Points to the tweak to decrypt with. - * \param output Output buffer for the plaintext. - * \param input Input buffer for the ciphertext. - * - * \sa clyde128_encrypt() - */ -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]); - -/** - * \brief Performs the Shadow-512 permutation on a state. - * - * \param state The Shadow-512 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow384() - */ -void shadow512(shadow512_state_t *state); - -/** - * \brief Performs the Shadow-384 permutation on a state. - * - * \param state The Shadow-384 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow512() - */ -void shadow384(shadow384_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-util.h b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.c b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.c deleted file mode 100644 index d075b33..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.c +++ /dev/null @@ -1,552 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "spook.h" -#include "internal-spook.h" -#include "internal-util.h" -#include - -aead_cipher_t const spook_128_512_su_cipher = { - "Spook-128-512-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_su_aead_encrypt, - spook_128_512_su_aead_decrypt -}; - -aead_cipher_t const spook_128_384_su_cipher = { - "Spook-128-384-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_su_aead_encrypt, - spook_128_384_su_aead_decrypt -}; - -aead_cipher_t const spook_128_512_mu_cipher = { - "Spook-128-512-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_mu_aead_encrypt, - spook_128_512_mu_aead_decrypt -}; - -aead_cipher_t const spook_128_384_mu_cipher = { - "Spook-128-384-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_mu_aead_encrypt, - spook_128_384_mu_aead_decrypt -}; - -/** - * \brief Initializes the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_512_init - (shadow512_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW512_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 12, state->W + 4); - shadow512(state); -} - -/** - * \brief Initializes the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_384_init - (shadow384_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW384_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 8, state->W + 4); - shadow384(state); -} - -/** - * \brief Absorbs associated data into the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_512_absorb - (shadow512_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW512_RATE) { - lw_xor_block(state->B, ad, SHADOW512_RATE); - shadow512(state); - ad += SHADOW512_RATE; - adlen -= SHADOW512_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Absorbs associated data into the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_384_absorb - (shadow384_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW384_RATE) { - lw_xor_block(state->B, ad, SHADOW384_RATE); - shadow384(state); - ad += SHADOW384_RATE; - adlen -= SHADOW384_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_512_encrypt - (shadow512_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (mlen >= SHADOW512_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - mlen -= SHADOW512_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_384_encrypt - (shadow384_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (mlen >= SHADOW384_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - mlen -= SHADOW384_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_512_decrypt - (shadow512_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (clen >= SHADOW512_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - clen -= SHADOW512_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_384_decrypt - (shadow384_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (clen >= SHADOW384_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - clen -= SHADOW384_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.h b/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.h deleted file mode 100644 index 68b6a25..0000000 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys-avr/spook.h +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPOOK_H -#define LWCRYPTO_SPOOK_H - -#include "aead-common.h" - -/** - * \file spook.h - * \brief Spook authenticated encryption algorithm. - * - * Spook is a family of authenticated encryption algorithms that are - * built around a tweakable block cipher and a permutation. If the - * tweakable block cipher is implemented as a masked block cipher, - * then Spook provides protection against power analysis side channels. - * - * There are four members in the Spook family: - * - * \li Spook-128-512-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 512-bit permutation. This is the primary - * member of the family. - * \li Spook-128-384-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 384-bit permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 512-bit - * permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 384-bit - * permutation. - * - * In this library, the "mu" (multi-user) variants combine the 128-bit key - * and the 128-bit public tweak into a single 256-bit key value. - * Applications can either view this as a cipher with a 256-bit key, - * or they can split the key value into secret and public halves. - * Even with the use of 256-bit keys, Spook only has 128-bit security. - * - * References: https://www.spook.dev/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for the single-user version of Spook. - */ -#define SPOOK_SU_KEY_SIZE 16 - -/** - * \brief Size of the key for the multi-user version of Spook. - */ -#define SPOOK_MU_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for all Spook family members. - */ -#define SPOOK_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all Spook family members. - */ -#define SPOOK_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the Spook-128-512-su cipher. - */ -extern aead_cipher_t const spook_128_512_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-su cipher. - */ -extern aead_cipher_t const spook_128_384_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-512-mu cipher. - */ -extern aead_cipher_t const spook_128_512_mu_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-mu cipher. - */ -extern aead_cipher_t const spook_128_384_mu_cipher; - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_su_aead_decrypt() - */ -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_su_aead_encrypt() - */ -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_su_aead_decrypt() - */ -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_su_aead_encrypt() - */ -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_mu_aead_decrypt() - */ -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_mu_aead_encrypt() - */ -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_mu_aead_decrypt() - */ -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_mu_aead_encrypt() - */ -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu384v1/rhys/internal-util.h b/spook/Implementations/crypto_aead/spook128mu384v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/spook/Implementations/crypto_aead/spook128mu384v1/rhys/internal-util.h +++ b/spook/Implementations/crypto_aead/spook128mu384v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.c b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.h b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/api.h b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/api.h deleted file mode 100644 index fb1dab8..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/encrypt.c b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/encrypt.c deleted file mode 100644 index 52c6ec8..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "spook.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_512_mu_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_512_mu_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.c b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.c deleted file mode 100644 index 0e19216..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.c +++ /dev/null @@ -1,557 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-spook.h" - -/** - * \brief Number of steps in the Clyde-128 block cipher. - * - * This is also the number of steps in the Shadow-512 and Shadow-384 - * permutations. - */ -#define CLYDE128_STEPS 6 - -/** - * \brief Round constants for the steps of Clyde-128. - */ -static uint8_t const rc[CLYDE128_STEPS][8] = { - {1, 0, 0, 0, 0, 1, 0, 0}, - {0, 0, 1, 0, 0, 0, 0, 1}, - {1, 1, 0, 0, 0, 1, 1, 0}, - {0, 0, 1, 1, 1, 1, 0, 1}, - {1, 0, 1, 0, 0, 1, 0, 1}, - {1, 1, 1, 0, 0, 1, 1, 1} -}; - -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t c, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); - s0 = le_load_word32((const unsigned char *)&(input[0])); - s1 = le_load_word32((const unsigned char *)&(input[1])); - s2 = le_load_word32((const unsigned char *)&(input[2])); - s3 = le_load_word32((const unsigned char *)&(input[3])); -#endif - - /* Add the initial tweakey to the state */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Perform the two rounds of this step */ - #define clyde128_sbox(s0, s1, s2, s3) \ - do { \ - c = (s0 & s1) ^ s2; \ - d = (s3 & s0) ^ s1; \ - s2 = (c & d) ^ s3; \ - s3 = (c & s3) ^ s0; \ - s0 = d; \ - s1 = c; \ - } while (0) - #define clyde128_lbox(x, y) \ - do { \ - c = x ^ rightRotate12(x); \ - d = y ^ rightRotate12(y); \ - c ^= rightRotate3(c); \ - d ^= rightRotate3(d); \ - x = c ^ leftRotate15(x); \ - y = d ^ leftRotate15(y); \ - c = x ^ leftRotate1(x); \ - d = y ^ leftRotate1(y); \ - x ^= leftRotate6(d); \ - y ^= leftRotate7(c); \ - x ^= rightRotate15(c); \ - y ^= rightRotate15(d); \ - } while (0) - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - - /* Update the tweakey on the fly and add it to the state */ - c = t2 ^ t0; - d = t3 ^ t1; - t2 = t0; - t3 = t1; - t0 = c; - t1 = d; - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - } - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t a, b, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); -#endif - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all rounds in pairs */ - for (step = CLYDE128_STEPS - 1; step >= 0; --step) { - /* Add the tweakey to the state and update the tweakey */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - a = t2 ^ t0; - b = t3 ^ t1; - t0 = t2; - t1 = t3; - t2 = a; - t3 = b; - - /* Perform the two rounds of this step */ - #define clyde128_inv_sbox(s0, s1, s2, s3) \ - do { \ - d = (s0 & s1) ^ s2; \ - a = (s1 & d) ^ s3; \ - b = (d & a) ^ s0; \ - s2 = (a & b) ^ s1; \ - s0 = a; \ - s1 = b; \ - s3 = d; \ - } while (0) - #define clyde128_inv_lbox(x, y) \ - do { \ - a = x ^ leftRotate7(x); \ - b = y ^ leftRotate7(y); \ - x ^= leftRotate1(a); \ - y ^= leftRotate1(b); \ - x ^= leftRotate12(a); \ - y ^= leftRotate12(b); \ - a = x ^ leftRotate1(x); \ - b = y ^ leftRotate1(y); \ - x ^= leftRotate6(b); \ - y ^= leftRotate7(a); \ - a ^= leftRotate15(x); \ - b ^= leftRotate15(y); \ - x = rightRotate16(a); \ - y = rightRotate16(b); \ - } while (0) - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - } - - /* Add the tweakey to the state one last time */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void shadow512(shadow512_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t s30, s31, s32, s33; - uint32_t c, d, w, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; - s30 = state->W[12]; - s31 = state->W[13]; - s32 = state->W[14]; - s33 = state->W[15]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); - s30 = le_load_word32(state->B + 48); - s31 = le_load_word32(state->B + 52); - s32 = le_load_word32(state->B + 56); - s33 = le_load_word32(state->B + 60); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the S-box and L-box to bundle 3 */ - clyde128_sbox(s30, s31, s32, s33); - clyde128_lbox(s30, s31); - clyde128_lbox(s32, s33); - s30 ^= rc[step][0] << 3; - s31 ^= rc[step][1] << 3; - s32 ^= rc[step][2] << 3; - s33 ^= rc[step][3] << 3; - clyde128_sbox(s30, s31, s32, s33); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow512_diffusion_layer(row) \ - do { \ - w = s0##row; \ - x = s1##row; \ - y = s2##row; \ - z = s3##row; \ - c = w ^ x; \ - d = y ^ z; \ - s0##row = x ^ d; \ - s1##row = w ^ d; \ - s2##row = c ^ z; \ - s3##row = c ^ y; \ - } while (0) - shadow512_diffusion_layer(0); - shadow512_diffusion_layer(1); - shadow512_diffusion_layer(2); - shadow512_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - s30 ^= rc[step][4] << 3; - s31 ^= rc[step][5] << 3; - s32 ^= rc[step][6] << 3; - s33 ^= rc[step][7] << 3; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; - state->W[12] = s30; - state->W[13] = s31; - state->W[14] = s32; - state->W[15] = s33; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); - le_store_word32(state->B + 48, s30); - le_store_word32(state->B + 52, s31); - le_store_word32(state->B + 56, s32); - le_store_word32(state->B + 60, s33); -#endif -} - -void shadow384(shadow384_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t c, d, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow384_diffusion_layer(row) \ - do { \ - x = s0##row; \ - y = s1##row; \ - z = s2##row; \ - s0##row = x ^ y ^ z; \ - s1##row = x ^ z; \ - s2##row = x ^ y; \ - } while (0) - shadow384_diffusion_layer(0); - shadow384_diffusion_layer(1); - shadow384_diffusion_layer(2); - shadow384_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); -#endif -} diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.h b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.h deleted file mode 100644 index b08ce80..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-spook.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPOOK_H -#define LW_INTERNAL_SPOOK_H - -#include "internal-util.h" - -/** - * \file internal-spook.h - * \brief Internal implementation details of the Spook AEAD mode. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the block for the Clyde-128 block cipher. - */ -#define CLYDE128_BLOCK_SIZE 16 - -/** - * \brief Size of the key for the Clyde-128 block cipher. - */ -#define CLYDE128_KEY_SIZE 16 - -/** - * \brief Size of the tweak for the Clyde-128 block cipher. - */ -#define CLYDE128_TWEAK_SIZE 16 - -/** - * \brief Size of the state for Shadow-512. - */ -#define SHADOW512_STATE_SIZE 64 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-512 state. - */ -#define SHADOW512_RATE 32 - -/** - * \brief Size of the state for Shadow-384. - */ -#define SHADOW384_STATE_SIZE 48 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-384 state. - */ -#define SHADOW384_RATE 16 - -/** - * \brief Internal state of the Shadow-512 permutation. - */ -typedef union -{ - uint32_t W[SHADOW512_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW512_STATE_SIZE]; /**< Bytes of the state */ - -} shadow512_state_t; - -/** - * \brief Internal state of the Shadow-384 permutation. - */ -typedef union -{ - uint32_t W[SHADOW384_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW384_STATE_SIZE]; /**< Bytes of the state */ - -} shadow384_state_t; - -/** - * \brief Encrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to encrypt with. - * \param tweak Points to the tweak to encrypt with. - * \param output Output buffer for the ciphertext. - * \param input Input buffer for the plaintext. - * - * \sa clyde128_decrypt() - */ -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]); - -/** - * \brief Decrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to decrypt with. - * \param tweak Points to the tweak to decrypt with. - * \param output Output buffer for the plaintext. - * \param input Input buffer for the ciphertext. - * - * \sa clyde128_encrypt() - */ -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]); - -/** - * \brief Performs the Shadow-512 permutation on a state. - * - * \param state The Shadow-512 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow384() - */ -void shadow512(shadow512_state_t *state); - -/** - * \brief Performs the Shadow-384 permutation on a state. - * - * \param state The Shadow-384 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow512() - */ -void shadow384(shadow384_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-util.h b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.c b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.c deleted file mode 100644 index d075b33..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.c +++ /dev/null @@ -1,552 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "spook.h" -#include "internal-spook.h" -#include "internal-util.h" -#include - -aead_cipher_t const spook_128_512_su_cipher = { - "Spook-128-512-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_su_aead_encrypt, - spook_128_512_su_aead_decrypt -}; - -aead_cipher_t const spook_128_384_su_cipher = { - "Spook-128-384-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_su_aead_encrypt, - spook_128_384_su_aead_decrypt -}; - -aead_cipher_t const spook_128_512_mu_cipher = { - "Spook-128-512-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_mu_aead_encrypt, - spook_128_512_mu_aead_decrypt -}; - -aead_cipher_t const spook_128_384_mu_cipher = { - "Spook-128-384-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_mu_aead_encrypt, - spook_128_384_mu_aead_decrypt -}; - -/** - * \brief Initializes the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_512_init - (shadow512_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW512_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 12, state->W + 4); - shadow512(state); -} - -/** - * \brief Initializes the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_384_init - (shadow384_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW384_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 8, state->W + 4); - shadow384(state); -} - -/** - * \brief Absorbs associated data into the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_512_absorb - (shadow512_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW512_RATE) { - lw_xor_block(state->B, ad, SHADOW512_RATE); - shadow512(state); - ad += SHADOW512_RATE; - adlen -= SHADOW512_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Absorbs associated data into the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_384_absorb - (shadow384_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW384_RATE) { - lw_xor_block(state->B, ad, SHADOW384_RATE); - shadow384(state); - ad += SHADOW384_RATE; - adlen -= SHADOW384_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_512_encrypt - (shadow512_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (mlen >= SHADOW512_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - mlen -= SHADOW512_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_384_encrypt - (shadow384_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (mlen >= SHADOW384_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - mlen -= SHADOW384_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_512_decrypt - (shadow512_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (clen >= SHADOW512_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - clen -= SHADOW512_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_384_decrypt - (shadow384_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (clen >= SHADOW384_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - clen -= SHADOW384_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.h b/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.h deleted file mode 100644 index 68b6a25..0000000 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys-avr/spook.h +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPOOK_H -#define LWCRYPTO_SPOOK_H - -#include "aead-common.h" - -/** - * \file spook.h - * \brief Spook authenticated encryption algorithm. - * - * Spook is a family of authenticated encryption algorithms that are - * built around a tweakable block cipher and a permutation. If the - * tweakable block cipher is implemented as a masked block cipher, - * then Spook provides protection against power analysis side channels. - * - * There are four members in the Spook family: - * - * \li Spook-128-512-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 512-bit permutation. This is the primary - * member of the family. - * \li Spook-128-384-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 384-bit permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 512-bit - * permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 384-bit - * permutation. - * - * In this library, the "mu" (multi-user) variants combine the 128-bit key - * and the 128-bit public tweak into a single 256-bit key value. - * Applications can either view this as a cipher with a 256-bit key, - * or they can split the key value into secret and public halves. - * Even with the use of 256-bit keys, Spook only has 128-bit security. - * - * References: https://www.spook.dev/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for the single-user version of Spook. - */ -#define SPOOK_SU_KEY_SIZE 16 - -/** - * \brief Size of the key for the multi-user version of Spook. - */ -#define SPOOK_MU_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for all Spook family members. - */ -#define SPOOK_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all Spook family members. - */ -#define SPOOK_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the Spook-128-512-su cipher. - */ -extern aead_cipher_t const spook_128_512_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-su cipher. - */ -extern aead_cipher_t const spook_128_384_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-512-mu cipher. - */ -extern aead_cipher_t const spook_128_512_mu_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-mu cipher. - */ -extern aead_cipher_t const spook_128_384_mu_cipher; - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_su_aead_decrypt() - */ -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_su_aead_encrypt() - */ -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_su_aead_decrypt() - */ -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_su_aead_encrypt() - */ -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_mu_aead_decrypt() - */ -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_mu_aead_encrypt() - */ -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_mu_aead_decrypt() - */ -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_mu_aead_encrypt() - */ -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128mu512v1/rhys/internal-util.h b/spook/Implementations/crypto_aead/spook128mu512v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/spook/Implementations/crypto_aead/spook128mu512v1/rhys/internal-util.h +++ b/spook/Implementations/crypto_aead/spook128mu512v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.c b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.h b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/api.h b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/encrypt.c b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/encrypt.c deleted file mode 100644 index e61a44a..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "spook.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_384_su_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_384_su_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.c b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.c deleted file mode 100644 index 0e19216..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.c +++ /dev/null @@ -1,557 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-spook.h" - -/** - * \brief Number of steps in the Clyde-128 block cipher. - * - * This is also the number of steps in the Shadow-512 and Shadow-384 - * permutations. - */ -#define CLYDE128_STEPS 6 - -/** - * \brief Round constants for the steps of Clyde-128. - */ -static uint8_t const rc[CLYDE128_STEPS][8] = { - {1, 0, 0, 0, 0, 1, 0, 0}, - {0, 0, 1, 0, 0, 0, 0, 1}, - {1, 1, 0, 0, 0, 1, 1, 0}, - {0, 0, 1, 1, 1, 1, 0, 1}, - {1, 0, 1, 0, 0, 1, 0, 1}, - {1, 1, 1, 0, 0, 1, 1, 1} -}; - -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t c, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); - s0 = le_load_word32((const unsigned char *)&(input[0])); - s1 = le_load_word32((const unsigned char *)&(input[1])); - s2 = le_load_word32((const unsigned char *)&(input[2])); - s3 = le_load_word32((const unsigned char *)&(input[3])); -#endif - - /* Add the initial tweakey to the state */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Perform the two rounds of this step */ - #define clyde128_sbox(s0, s1, s2, s3) \ - do { \ - c = (s0 & s1) ^ s2; \ - d = (s3 & s0) ^ s1; \ - s2 = (c & d) ^ s3; \ - s3 = (c & s3) ^ s0; \ - s0 = d; \ - s1 = c; \ - } while (0) - #define clyde128_lbox(x, y) \ - do { \ - c = x ^ rightRotate12(x); \ - d = y ^ rightRotate12(y); \ - c ^= rightRotate3(c); \ - d ^= rightRotate3(d); \ - x = c ^ leftRotate15(x); \ - y = d ^ leftRotate15(y); \ - c = x ^ leftRotate1(x); \ - d = y ^ leftRotate1(y); \ - x ^= leftRotate6(d); \ - y ^= leftRotate7(c); \ - x ^= rightRotate15(c); \ - y ^= rightRotate15(d); \ - } while (0) - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - - /* Update the tweakey on the fly and add it to the state */ - c = t2 ^ t0; - d = t3 ^ t1; - t2 = t0; - t3 = t1; - t0 = c; - t1 = d; - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - } - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t a, b, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); -#endif - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all rounds in pairs */ - for (step = CLYDE128_STEPS - 1; step >= 0; --step) { - /* Add the tweakey to the state and update the tweakey */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - a = t2 ^ t0; - b = t3 ^ t1; - t0 = t2; - t1 = t3; - t2 = a; - t3 = b; - - /* Perform the two rounds of this step */ - #define clyde128_inv_sbox(s0, s1, s2, s3) \ - do { \ - d = (s0 & s1) ^ s2; \ - a = (s1 & d) ^ s3; \ - b = (d & a) ^ s0; \ - s2 = (a & b) ^ s1; \ - s0 = a; \ - s1 = b; \ - s3 = d; \ - } while (0) - #define clyde128_inv_lbox(x, y) \ - do { \ - a = x ^ leftRotate7(x); \ - b = y ^ leftRotate7(y); \ - x ^= leftRotate1(a); \ - y ^= leftRotate1(b); \ - x ^= leftRotate12(a); \ - y ^= leftRotate12(b); \ - a = x ^ leftRotate1(x); \ - b = y ^ leftRotate1(y); \ - x ^= leftRotate6(b); \ - y ^= leftRotate7(a); \ - a ^= leftRotate15(x); \ - b ^= leftRotate15(y); \ - x = rightRotate16(a); \ - y = rightRotate16(b); \ - } while (0) - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - } - - /* Add the tweakey to the state one last time */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void shadow512(shadow512_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t s30, s31, s32, s33; - uint32_t c, d, w, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; - s30 = state->W[12]; - s31 = state->W[13]; - s32 = state->W[14]; - s33 = state->W[15]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); - s30 = le_load_word32(state->B + 48); - s31 = le_load_word32(state->B + 52); - s32 = le_load_word32(state->B + 56); - s33 = le_load_word32(state->B + 60); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the S-box and L-box to bundle 3 */ - clyde128_sbox(s30, s31, s32, s33); - clyde128_lbox(s30, s31); - clyde128_lbox(s32, s33); - s30 ^= rc[step][0] << 3; - s31 ^= rc[step][1] << 3; - s32 ^= rc[step][2] << 3; - s33 ^= rc[step][3] << 3; - clyde128_sbox(s30, s31, s32, s33); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow512_diffusion_layer(row) \ - do { \ - w = s0##row; \ - x = s1##row; \ - y = s2##row; \ - z = s3##row; \ - c = w ^ x; \ - d = y ^ z; \ - s0##row = x ^ d; \ - s1##row = w ^ d; \ - s2##row = c ^ z; \ - s3##row = c ^ y; \ - } while (0) - shadow512_diffusion_layer(0); - shadow512_diffusion_layer(1); - shadow512_diffusion_layer(2); - shadow512_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - s30 ^= rc[step][4] << 3; - s31 ^= rc[step][5] << 3; - s32 ^= rc[step][6] << 3; - s33 ^= rc[step][7] << 3; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; - state->W[12] = s30; - state->W[13] = s31; - state->W[14] = s32; - state->W[15] = s33; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); - le_store_word32(state->B + 48, s30); - le_store_word32(state->B + 52, s31); - le_store_word32(state->B + 56, s32); - le_store_word32(state->B + 60, s33); -#endif -} - -void shadow384(shadow384_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t c, d, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow384_diffusion_layer(row) \ - do { \ - x = s0##row; \ - y = s1##row; \ - z = s2##row; \ - s0##row = x ^ y ^ z; \ - s1##row = x ^ z; \ - s2##row = x ^ y; \ - } while (0) - shadow384_diffusion_layer(0); - shadow384_diffusion_layer(1); - shadow384_diffusion_layer(2); - shadow384_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); -#endif -} diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.h b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.h deleted file mode 100644 index b08ce80..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-spook.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPOOK_H -#define LW_INTERNAL_SPOOK_H - -#include "internal-util.h" - -/** - * \file internal-spook.h - * \brief Internal implementation details of the Spook AEAD mode. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the block for the Clyde-128 block cipher. - */ -#define CLYDE128_BLOCK_SIZE 16 - -/** - * \brief Size of the key for the Clyde-128 block cipher. - */ -#define CLYDE128_KEY_SIZE 16 - -/** - * \brief Size of the tweak for the Clyde-128 block cipher. - */ -#define CLYDE128_TWEAK_SIZE 16 - -/** - * \brief Size of the state for Shadow-512. - */ -#define SHADOW512_STATE_SIZE 64 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-512 state. - */ -#define SHADOW512_RATE 32 - -/** - * \brief Size of the state for Shadow-384. - */ -#define SHADOW384_STATE_SIZE 48 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-384 state. - */ -#define SHADOW384_RATE 16 - -/** - * \brief Internal state of the Shadow-512 permutation. - */ -typedef union -{ - uint32_t W[SHADOW512_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW512_STATE_SIZE]; /**< Bytes of the state */ - -} shadow512_state_t; - -/** - * \brief Internal state of the Shadow-384 permutation. - */ -typedef union -{ - uint32_t W[SHADOW384_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW384_STATE_SIZE]; /**< Bytes of the state */ - -} shadow384_state_t; - -/** - * \brief Encrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to encrypt with. - * \param tweak Points to the tweak to encrypt with. - * \param output Output buffer for the ciphertext. - * \param input Input buffer for the plaintext. - * - * \sa clyde128_decrypt() - */ -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]); - -/** - * \brief Decrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to decrypt with. - * \param tweak Points to the tweak to decrypt with. - * \param output Output buffer for the plaintext. - * \param input Input buffer for the ciphertext. - * - * \sa clyde128_encrypt() - */ -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]); - -/** - * \brief Performs the Shadow-512 permutation on a state. - * - * \param state The Shadow-512 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow384() - */ -void shadow512(shadow512_state_t *state); - -/** - * \brief Performs the Shadow-384 permutation on a state. - * - * \param state The Shadow-384 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow512() - */ -void shadow384(shadow384_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-util.h b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.c b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.c deleted file mode 100644 index d075b33..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.c +++ /dev/null @@ -1,552 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "spook.h" -#include "internal-spook.h" -#include "internal-util.h" -#include - -aead_cipher_t const spook_128_512_su_cipher = { - "Spook-128-512-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_su_aead_encrypt, - spook_128_512_su_aead_decrypt -}; - -aead_cipher_t const spook_128_384_su_cipher = { - "Spook-128-384-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_su_aead_encrypt, - spook_128_384_su_aead_decrypt -}; - -aead_cipher_t const spook_128_512_mu_cipher = { - "Spook-128-512-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_mu_aead_encrypt, - spook_128_512_mu_aead_decrypt -}; - -aead_cipher_t const spook_128_384_mu_cipher = { - "Spook-128-384-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_mu_aead_encrypt, - spook_128_384_mu_aead_decrypt -}; - -/** - * \brief Initializes the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_512_init - (shadow512_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW512_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 12, state->W + 4); - shadow512(state); -} - -/** - * \brief Initializes the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_384_init - (shadow384_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW384_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 8, state->W + 4); - shadow384(state); -} - -/** - * \brief Absorbs associated data into the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_512_absorb - (shadow512_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW512_RATE) { - lw_xor_block(state->B, ad, SHADOW512_RATE); - shadow512(state); - ad += SHADOW512_RATE; - adlen -= SHADOW512_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Absorbs associated data into the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_384_absorb - (shadow384_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW384_RATE) { - lw_xor_block(state->B, ad, SHADOW384_RATE); - shadow384(state); - ad += SHADOW384_RATE; - adlen -= SHADOW384_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_512_encrypt - (shadow512_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (mlen >= SHADOW512_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - mlen -= SHADOW512_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_384_encrypt - (shadow384_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (mlen >= SHADOW384_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - mlen -= SHADOW384_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_512_decrypt - (shadow512_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (clen >= SHADOW512_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - clen -= SHADOW512_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_384_decrypt - (shadow384_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (clen >= SHADOW384_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - clen -= SHADOW384_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.h b/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.h deleted file mode 100644 index 68b6a25..0000000 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys-avr/spook.h +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPOOK_H -#define LWCRYPTO_SPOOK_H - -#include "aead-common.h" - -/** - * \file spook.h - * \brief Spook authenticated encryption algorithm. - * - * Spook is a family of authenticated encryption algorithms that are - * built around a tweakable block cipher and a permutation. If the - * tweakable block cipher is implemented as a masked block cipher, - * then Spook provides protection against power analysis side channels. - * - * There are four members in the Spook family: - * - * \li Spook-128-512-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 512-bit permutation. This is the primary - * member of the family. - * \li Spook-128-384-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 384-bit permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 512-bit - * permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 384-bit - * permutation. - * - * In this library, the "mu" (multi-user) variants combine the 128-bit key - * and the 128-bit public tweak into a single 256-bit key value. - * Applications can either view this as a cipher with a 256-bit key, - * or they can split the key value into secret and public halves. - * Even with the use of 256-bit keys, Spook only has 128-bit security. - * - * References: https://www.spook.dev/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for the single-user version of Spook. - */ -#define SPOOK_SU_KEY_SIZE 16 - -/** - * \brief Size of the key for the multi-user version of Spook. - */ -#define SPOOK_MU_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for all Spook family members. - */ -#define SPOOK_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all Spook family members. - */ -#define SPOOK_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the Spook-128-512-su cipher. - */ -extern aead_cipher_t const spook_128_512_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-su cipher. - */ -extern aead_cipher_t const spook_128_384_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-512-mu cipher. - */ -extern aead_cipher_t const spook_128_512_mu_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-mu cipher. - */ -extern aead_cipher_t const spook_128_384_mu_cipher; - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_su_aead_decrypt() - */ -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_su_aead_encrypt() - */ -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_su_aead_decrypt() - */ -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_su_aead_encrypt() - */ -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_mu_aead_decrypt() - */ -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_mu_aead_encrypt() - */ -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_mu_aead_decrypt() - */ -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_mu_aead_encrypt() - */ -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su384v1/rhys/internal-util.h b/spook/Implementations/crypto_aead/spook128su384v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/spook/Implementations/crypto_aead/spook128su384v1/rhys/internal-util.h +++ b/spook/Implementations/crypto_aead/spook128su384v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.c b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.h b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/api.h b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/encrypt.c b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/encrypt.c deleted file mode 100644 index 0d3db2e..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "spook.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_512_su_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return spook_128_512_su_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.c b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.c deleted file mode 100644 index 0e19216..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.c +++ /dev/null @@ -1,557 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-spook.h" - -/** - * \brief Number of steps in the Clyde-128 block cipher. - * - * This is also the number of steps in the Shadow-512 and Shadow-384 - * permutations. - */ -#define CLYDE128_STEPS 6 - -/** - * \brief Round constants for the steps of Clyde-128. - */ -static uint8_t const rc[CLYDE128_STEPS][8] = { - {1, 0, 0, 0, 0, 1, 0, 0}, - {0, 0, 1, 0, 0, 0, 0, 1}, - {1, 1, 0, 0, 0, 1, 1, 0}, - {0, 0, 1, 1, 1, 1, 0, 1}, - {1, 0, 1, 0, 0, 1, 0, 1}, - {1, 1, 1, 0, 0, 1, 1, 1} -}; - -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t c, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); - s0 = le_load_word32((const unsigned char *)&(input[0])); - s1 = le_load_word32((const unsigned char *)&(input[1])); - s2 = le_load_word32((const unsigned char *)&(input[2])); - s3 = le_load_word32((const unsigned char *)&(input[3])); -#endif - - /* Add the initial tweakey to the state */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Perform the two rounds of this step */ - #define clyde128_sbox(s0, s1, s2, s3) \ - do { \ - c = (s0 & s1) ^ s2; \ - d = (s3 & s0) ^ s1; \ - s2 = (c & d) ^ s3; \ - s3 = (c & s3) ^ s0; \ - s0 = d; \ - s1 = c; \ - } while (0) - #define clyde128_lbox(x, y) \ - do { \ - c = x ^ rightRotate12(x); \ - d = y ^ rightRotate12(y); \ - c ^= rightRotate3(c); \ - d ^= rightRotate3(d); \ - x = c ^ leftRotate15(x); \ - y = d ^ leftRotate15(y); \ - c = x ^ leftRotate1(x); \ - d = y ^ leftRotate1(y); \ - x ^= leftRotate6(d); \ - y ^= leftRotate7(c); \ - x ^= rightRotate15(c); \ - y ^= rightRotate15(d); \ - } while (0) - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_sbox(s0, s1, s2, s3); - clyde128_lbox(s0, s1); - clyde128_lbox(s2, s3); - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - - /* Update the tweakey on the fly and add it to the state */ - c = t2 ^ t0; - d = t3 ^ t1; - t2 = t0; - t3 = t1; - t0 = c; - t1 = d; - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - } - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]) -{ - uint32_t k0, k1, k2, k3; - uint32_t t0, t1, t2, t3; - uint32_t s0, s1, s2, s3; - uint32_t a, b, d; - int step; - - /* Unpack the key, tweak, and state */ - k0 = le_load_word32(key); - k1 = le_load_word32(key + 4); - k2 = le_load_word32(key + 8); - k3 = le_load_word32(key + 12); -#if defined(LW_UTIL_LITTLE_ENDIAN) - t0 = tweak[0]; - t1 = tweak[1]; - t2 = tweak[2]; - t3 = tweak[3]; -#else - t0 = le_load_word32((const unsigned char *)&(tweak[0])); - t1 = le_load_word32((const unsigned char *)&(tweak[1])); - t2 = le_load_word32((const unsigned char *)&(tweak[2])); - t3 = le_load_word32((const unsigned char *)&(tweak[3])); -#endif - s0 = le_load_word32(input); - s1 = le_load_word32(input + 4); - s2 = le_load_word32(input + 8); - s3 = le_load_word32(input + 12); - - /* Perform all rounds in pairs */ - for (step = CLYDE128_STEPS - 1; step >= 0; --step) { - /* Add the tweakey to the state and update the tweakey */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - a = t2 ^ t0; - b = t3 ^ t1; - t0 = t2; - t1 = t3; - t2 = a; - t3 = b; - - /* Perform the two rounds of this step */ - #define clyde128_inv_sbox(s0, s1, s2, s3) \ - do { \ - d = (s0 & s1) ^ s2; \ - a = (s1 & d) ^ s3; \ - b = (d & a) ^ s0; \ - s2 = (a & b) ^ s1; \ - s0 = a; \ - s1 = b; \ - s3 = d; \ - } while (0) - #define clyde128_inv_lbox(x, y) \ - do { \ - a = x ^ leftRotate7(x); \ - b = y ^ leftRotate7(y); \ - x ^= leftRotate1(a); \ - y ^= leftRotate1(b); \ - x ^= leftRotate12(a); \ - y ^= leftRotate12(b); \ - a = x ^ leftRotate1(x); \ - b = y ^ leftRotate1(y); \ - x ^= leftRotate6(b); \ - y ^= leftRotate7(a); \ - a ^= leftRotate15(x); \ - b ^= leftRotate15(y); \ - x = rightRotate16(a); \ - y = rightRotate16(b); \ - } while (0) - s0 ^= rc[step][4]; - s1 ^= rc[step][5]; - s2 ^= rc[step][6]; - s3 ^= rc[step][7]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - s0 ^= rc[step][0]; - s1 ^= rc[step][1]; - s2 ^= rc[step][2]; - s3 ^= rc[step][3]; - clyde128_inv_lbox(s0, s1); - clyde128_inv_lbox(s2, s3); - clyde128_inv_sbox(s0, s1, s2, s3); - } - - /* Add the tweakey to the state one last time */ - s0 ^= k0 ^ t0; - s1 ^= k1 ^ t1; - s2 ^= k2 ^ t2; - s3 ^= k3 ^ t3; - - /* Pack the state into the output buffer */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -#else - le_store_word32((unsigned char *)&(output[0]), s0); - le_store_word32((unsigned char *)&(output[1]), s1); - le_store_word32((unsigned char *)&(output[2]), s2); - le_store_word32((unsigned char *)&(output[3]), s3); -#endif -} - -void shadow512(shadow512_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t s30, s31, s32, s33; - uint32_t c, d, w, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; - s30 = state->W[12]; - s31 = state->W[13]; - s32 = state->W[14]; - s33 = state->W[15]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); - s30 = le_load_word32(state->B + 48); - s31 = le_load_word32(state->B + 52); - s32 = le_load_word32(state->B + 56); - s33 = le_load_word32(state->B + 60); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the S-box and L-box to bundle 3 */ - clyde128_sbox(s30, s31, s32, s33); - clyde128_lbox(s30, s31); - clyde128_lbox(s32, s33); - s30 ^= rc[step][0] << 3; - s31 ^= rc[step][1] << 3; - s32 ^= rc[step][2] << 3; - s33 ^= rc[step][3] << 3; - clyde128_sbox(s30, s31, s32, s33); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow512_diffusion_layer(row) \ - do { \ - w = s0##row; \ - x = s1##row; \ - y = s2##row; \ - z = s3##row; \ - c = w ^ x; \ - d = y ^ z; \ - s0##row = x ^ d; \ - s1##row = w ^ d; \ - s2##row = c ^ z; \ - s3##row = c ^ y; \ - } while (0) - shadow512_diffusion_layer(0); - shadow512_diffusion_layer(1); - shadow512_diffusion_layer(2); - shadow512_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - s30 ^= rc[step][4] << 3; - s31 ^= rc[step][5] << 3; - s32 ^= rc[step][6] << 3; - s33 ^= rc[step][7] << 3; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; - state->W[12] = s30; - state->W[13] = s31; - state->W[14] = s32; - state->W[15] = s33; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); - le_store_word32(state->B + 48, s30); - le_store_word32(state->B + 52, s31); - le_store_word32(state->B + 56, s32); - le_store_word32(state->B + 60, s33); -#endif -} - -void shadow384(shadow384_state_t *state) -{ - uint32_t s00, s01, s02, s03; - uint32_t s10, s11, s12, s13; - uint32_t s20, s21, s22, s23; - uint32_t c, d, x, y, z; - int step; - - /* Unpack the state into local variables */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - s00 = state->W[0]; - s01 = state->W[1]; - s02 = state->W[2]; - s03 = state->W[3]; - s10 = state->W[4]; - s11 = state->W[5]; - s12 = state->W[6]; - s13 = state->W[7]; - s20 = state->W[8]; - s21 = state->W[9]; - s22 = state->W[10]; - s23 = state->W[11]; -#else - s00 = le_load_word32(state->B); - s01 = le_load_word32(state->B + 4); - s02 = le_load_word32(state->B + 8); - s03 = le_load_word32(state->B + 12); - s10 = le_load_word32(state->B + 16); - s11 = le_load_word32(state->B + 20); - s12 = le_load_word32(state->B + 24); - s13 = le_load_word32(state->B + 28); - s20 = le_load_word32(state->B + 32); - s21 = le_load_word32(state->B + 36); - s22 = le_load_word32(state->B + 40); - s23 = le_load_word32(state->B + 44); -#endif - - /* Perform all rounds in pairs */ - for (step = 0; step < CLYDE128_STEPS; ++step) { - /* Apply the S-box and L-box to bundle 0 */ - clyde128_sbox(s00, s01, s02, s03); - clyde128_lbox(s00, s01); - clyde128_lbox(s02, s03); - s00 ^= rc[step][0]; - s01 ^= rc[step][1]; - s02 ^= rc[step][2]; - s03 ^= rc[step][3]; - clyde128_sbox(s00, s01, s02, s03); - - /* Apply the S-box and L-box to bundle 1 */ - clyde128_sbox(s10, s11, s12, s13); - clyde128_lbox(s10, s11); - clyde128_lbox(s12, s13); - s10 ^= rc[step][0] << 1; - s11 ^= rc[step][1] << 1; - s12 ^= rc[step][2] << 1; - s13 ^= rc[step][3] << 1; - clyde128_sbox(s10, s11, s12, s13); - - /* Apply the S-box and L-box to bundle 2 */ - clyde128_sbox(s20, s21, s22, s23); - clyde128_lbox(s20, s21); - clyde128_lbox(s22, s23); - s20 ^= rc[step][0] << 2; - s21 ^= rc[step][1] << 2; - s22 ^= rc[step][2] << 2; - s23 ^= rc[step][3] << 2; - clyde128_sbox(s20, s21, s22, s23); - - /* Apply the diffusion layer to the rows of the state */ - #define shadow384_diffusion_layer(row) \ - do { \ - x = s0##row; \ - y = s1##row; \ - z = s2##row; \ - s0##row = x ^ y ^ z; \ - s1##row = x ^ z; \ - s2##row = x ^ y; \ - } while (0) - shadow384_diffusion_layer(0); - shadow384_diffusion_layer(1); - shadow384_diffusion_layer(2); - shadow384_diffusion_layer(3); - - /* Add round constants to all bundles again */ - s00 ^= rc[step][4]; - s01 ^= rc[step][5]; - s02 ^= rc[step][6]; - s03 ^= rc[step][7]; - s10 ^= rc[step][4] << 1; - s11 ^= rc[step][5] << 1; - s12 ^= rc[step][6] << 1; - s13 ^= rc[step][7] << 1; - s20 ^= rc[step][4] << 2; - s21 ^= rc[step][5] << 2; - s22 ^= rc[step][6] << 2; - s23 ^= rc[step][7] << 2; - } - - /* Pack the local variables back into the state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->W[0] = s00; - state->W[1] = s01; - state->W[2] = s02; - state->W[3] = s03; - state->W[4] = s10; - state->W[5] = s11; - state->W[6] = s12; - state->W[7] = s13; - state->W[8] = s20; - state->W[9] = s21; - state->W[10] = s22; - state->W[11] = s23; -#else - le_store_word32(state->B, s00); - le_store_word32(state->B + 4, s01); - le_store_word32(state->B + 8, s02); - le_store_word32(state->B + 12, s03); - le_store_word32(state->B + 16, s10); - le_store_word32(state->B + 20, s11); - le_store_word32(state->B + 24, s12); - le_store_word32(state->B + 28, s13); - le_store_word32(state->B + 32, s20); - le_store_word32(state->B + 36, s21); - le_store_word32(state->B + 40, s22); - le_store_word32(state->B + 44, s23); -#endif -} diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.h b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.h deleted file mode 100644 index b08ce80..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-spook.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SPOOK_H -#define LW_INTERNAL_SPOOK_H - -#include "internal-util.h" - -/** - * \file internal-spook.h - * \brief Internal implementation details of the Spook AEAD mode. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the block for the Clyde-128 block cipher. - */ -#define CLYDE128_BLOCK_SIZE 16 - -/** - * \brief Size of the key for the Clyde-128 block cipher. - */ -#define CLYDE128_KEY_SIZE 16 - -/** - * \brief Size of the tweak for the Clyde-128 block cipher. - */ -#define CLYDE128_TWEAK_SIZE 16 - -/** - * \brief Size of the state for Shadow-512. - */ -#define SHADOW512_STATE_SIZE 64 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-512 state. - */ -#define SHADOW512_RATE 32 - -/** - * \brief Size of the state for Shadow-384. - */ -#define SHADOW384_STATE_SIZE 48 - -/** - * \brief Rate to absorb data into or squeeze data out of a Shadow-384 state. - */ -#define SHADOW384_RATE 16 - -/** - * \brief Internal state of the Shadow-512 permutation. - */ -typedef union -{ - uint32_t W[SHADOW512_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW512_STATE_SIZE]; /**< Bytes of the state */ - -} shadow512_state_t; - -/** - * \brief Internal state of the Shadow-384 permutation. - */ -typedef union -{ - uint32_t W[SHADOW384_STATE_SIZE / 4]; /**< Words of the state */ - uint8_t B[SHADOW384_STATE_SIZE]; /**< Bytes of the state */ - -} shadow384_state_t; - -/** - * \brief Encrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to encrypt with. - * \param tweak Points to the tweak to encrypt with. - * \param output Output buffer for the ciphertext. - * \param input Input buffer for the plaintext. - * - * \sa clyde128_decrypt() - */ -void clyde128_encrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const uint32_t input[CLYDE128_BLOCK_SIZE / 4]); - -/** - * \brief Decrypts a block with the Clyde-128 block cipher. - * - * \param key Points to the key to decrypt with. - * \param tweak Points to the tweak to decrypt with. - * \param output Output buffer for the plaintext. - * \param input Input buffer for the ciphertext. - * - * \sa clyde128_encrypt() - */ -void clyde128_decrypt(const unsigned char key[CLYDE128_KEY_SIZE], - const uint32_t tweak[CLYDE128_TWEAK_SIZE / 4], - uint32_t output[CLYDE128_BLOCK_SIZE / 4], - const unsigned char input[CLYDE128_BLOCK_SIZE]); - -/** - * \brief Performs the Shadow-512 permutation on a state. - * - * \param state The Shadow-512 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow384() - */ -void shadow512(shadow512_state_t *state); - -/** - * \brief Performs the Shadow-384 permutation on a state. - * - * \param state The Shadow-384 state which will be in little-endian - * byte order on input and output. - * - * \sa shadow512() - */ -void shadow384(shadow384_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-util.h b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.c b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.c deleted file mode 100644 index d075b33..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.c +++ /dev/null @@ -1,552 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "spook.h" -#include "internal-spook.h" -#include "internal-util.h" -#include - -aead_cipher_t const spook_128_512_su_cipher = { - "Spook-128-512-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_su_aead_encrypt, - spook_128_512_su_aead_decrypt -}; - -aead_cipher_t const spook_128_384_su_cipher = { - "Spook-128-384-su", - SPOOK_SU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_su_aead_encrypt, - spook_128_384_su_aead_decrypt -}; - -aead_cipher_t const spook_128_512_mu_cipher = { - "Spook-128-512-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_512_mu_aead_encrypt, - spook_128_512_mu_aead_decrypt -}; - -aead_cipher_t const spook_128_384_mu_cipher = { - "Spook-128-384-mu", - SPOOK_MU_KEY_SIZE, - SPOOK_NONCE_SIZE, - SPOOK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - spook_128_384_mu_aead_encrypt, - spook_128_384_mu_aead_decrypt -}; - -/** - * \brief Initializes the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_512_init - (shadow512_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW512_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 12, state->W + 4); - shadow512(state); -} - -/** - * \brief Initializes the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param k Points to the key. - * \param klen Length of the key in bytes, either 16 or 32. - * \param npub Public nonce for the state. - */ -static void spook_128_384_init - (shadow384_state_t *state, - const unsigned char *k, unsigned klen, - const unsigned char *npub) -{ - memset(state->B, 0, SHADOW384_STATE_SIZE); - if (klen == SPOOK_MU_KEY_SIZE) { - /* The public tweak is 126 bits in size followed by a 1 bit */ - memcpy(state->B, k + CLYDE128_BLOCK_SIZE, CLYDE128_BLOCK_SIZE); - state->B[CLYDE128_BLOCK_SIZE - 1] &= 0x7F; - state->B[CLYDE128_BLOCK_SIZE - 1] |= 0x40; - } - memcpy(state->B + CLYDE128_BLOCK_SIZE, npub, CLYDE128_BLOCK_SIZE); - clyde128_encrypt(k, state->W, state->W + 8, state->W + 4); - shadow384(state); -} - -/** - * \brief Absorbs associated data into the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_512_absorb - (shadow512_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW512_RATE) { - lw_xor_block(state->B, ad, SHADOW512_RATE); - shadow512(state); - ad += SHADOW512_RATE; - adlen -= SHADOW512_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Absorbs associated data into the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes, must be non-zero. - */ -static void spook_128_384_absorb - (shadow384_state_t *state, - const unsigned char *ad, unsigned long long adlen) -{ - while (adlen >= SHADOW384_RATE) { - lw_xor_block(state->B, ad, SHADOW384_RATE); - shadow384(state); - ad += SHADOW384_RATE; - adlen -= SHADOW384_RATE; - } - if (adlen > 0) { - unsigned temp = (unsigned)adlen; - lw_xor_block(state->B, ad, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_512_encrypt - (shadow512_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (mlen >= SHADOW512_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - mlen -= SHADOW512_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Encrypts the plaintext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Number of bytes of plaintext to be encrypted. - */ -static void spook_128_384_encrypt - (shadow384_state_t *state, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (mlen >= SHADOW384_RATE) { - lw_xor_block_2_dest(c, state->B, m, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - mlen -= SHADOW384_RATE; - } - if (mlen > 0) { - unsigned temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state->B, m, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-512 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_512_decrypt - (shadow512_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW512_RATE] ^= 0x01; - while (clen >= SHADOW512_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW512_RATE); - shadow512(state); - c += SHADOW512_RATE; - m += SHADOW512_RATE; - clen -= SHADOW512_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW512_RATE] ^= 0x02; - shadow512(state); - } -} - -/** - * \brief Decrypts the ciphertext with the Shadow-384 sponge state. - * - * \param state The sponge state. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param clen Number of bytes of ciphertext to be decrypted. - */ -static void spook_128_384_decrypt - (shadow384_state_t *state, unsigned char *m, - const unsigned char *c, unsigned long long clen) -{ - state->B[SHADOW384_RATE] ^= 0x01; - while (clen >= SHADOW384_RATE) { - lw_xor_block_swap(m, state->B, c, SHADOW384_RATE); - shadow384(state); - c += SHADOW384_RATE; - m += SHADOW384_RATE; - clen -= SHADOW384_RATE; - } - if (clen > 0) { - unsigned temp = (unsigned)clen; - lw_xor_block_swap(m, state->B, c, temp); - state->B[temp] ^= 0x01; - state->B[SHADOW384_RATE] ^= 0x02; - shadow384(state); - } -} - -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_SU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_512_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow512_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-512 sponge state */ - spook_128_512_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_512_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_512_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} - -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - if (mlen > 0) - spook_128_384_encrypt(&state, c, m, mlen); - - /* Compute the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_encrypt(k, state.W + 4, state.W, state.W); - memcpy(c + mlen, state.B, SPOOK_TAG_SIZE); - return 0; -} - -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - shadow384_state_t state; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SPOOK_TAG_SIZE) - return -1; - *mlen = clen - SPOOK_TAG_SIZE; - - /* Initialize the Shadow-384 sponge state */ - spook_128_384_init(&state, k, SPOOK_MU_KEY_SIZE, npub); - - /* Process the associated data */ - if (adlen > 0) - spook_128_384_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SPOOK_TAG_SIZE; - if (clen > 0) - spook_128_384_decrypt(&state, m, c, clen); - - /* Check the authentication tag */ - state.B[CLYDE128_BLOCK_SIZE * 2 - 1] |= 0x80; - clyde128_decrypt(k, state.W + 4, state.W + 4, c + clen); - return aead_check_tag - (m, clen, state.B, state.B + CLYDE128_BLOCK_SIZE, SPOOK_TAG_SIZE); -} diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.h b/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.h deleted file mode 100644 index 68b6a25..0000000 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys-avr/spook.h +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SPOOK_H -#define LWCRYPTO_SPOOK_H - -#include "aead-common.h" - -/** - * \file spook.h - * \brief Spook authenticated encryption algorithm. - * - * Spook is a family of authenticated encryption algorithms that are - * built around a tweakable block cipher and a permutation. If the - * tweakable block cipher is implemented as a masked block cipher, - * then Spook provides protection against power analysis side channels. - * - * There are four members in the Spook family: - * - * \li Spook-128-512-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 512-bit permutation. This is the primary - * member of the family. - * \li Spook-128-384-su with a 128-bit key, a 128-bit nonce, and a 128-bit tag. - * Internally the algorithm uses a 384-bit permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 512-bit - * permutation. - * \li Spook-128-512-mu with a 128-bit key, a 128-bit public tweak, a 128-bit - * nonce, and a 128-bit tag. Internally the algorithm uses a 384-bit - * permutation. - * - * In this library, the "mu" (multi-user) variants combine the 128-bit key - * and the 128-bit public tweak into a single 256-bit key value. - * Applications can either view this as a cipher with a 256-bit key, - * or they can split the key value into secret and public halves. - * Even with the use of 256-bit keys, Spook only has 128-bit security. - * - * References: https://www.spook.dev/ - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for the single-user version of Spook. - */ -#define SPOOK_SU_KEY_SIZE 16 - -/** - * \brief Size of the key for the multi-user version of Spook. - */ -#define SPOOK_MU_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for all Spook family members. - */ -#define SPOOK_TAG_SIZE 16 - -/** - * \brief Size of the nonce for all Spook family members. - */ -#define SPOOK_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the Spook-128-512-su cipher. - */ -extern aead_cipher_t const spook_128_512_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-su cipher. - */ -extern aead_cipher_t const spook_128_384_su_cipher; - -/** - * \brief Meta-information block for the Spook-128-512-mu cipher. - */ -extern aead_cipher_t const spook_128_512_mu_cipher; - -/** - * \brief Meta-information block for the Spook-128-384-mu cipher. - */ -extern aead_cipher_t const spook_128_384_mu_cipher; - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_su_aead_decrypt() - */ -int spook_128_512_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_su_aead_encrypt() - */ -int spook_128_512_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-su. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_su_aead_decrypt() - */ -int spook_128_384_su_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-su. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_su_aead_encrypt() - */ -int spook_128_384_su_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-512-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_512_mu_aead_decrypt() - */ -int spook_128_512_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-512-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_512_mu_aead_encrypt() - */ -int spook_128_512_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with Spook-128-384-mu. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa spook_128_384_mu_aead_decrypt() - */ -int spook_128_384_mu_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Spook-128-384-mu. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa spook_128_384_mu_aead_encrypt() - */ -int spook_128_384_mu_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/spook/Implementations/crypto_aead/spook128su512v1/rhys/internal-util.h b/spook/Implementations/crypto_aead/spook128su512v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/spook/Implementations/crypto_aead/spook128su512v1/rhys/internal-util.h +++ b/spook/Implementations/crypto_aead/spook128su512v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.c b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.h b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/api.h b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/encrypt.c b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/encrypt.c deleted file mode 100644 index 2f166ad..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "subterranean.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return subterranean_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return subterranean_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.c b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.c deleted file mode 100644 index 1cb64e2..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.c +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-subterranean.h" -#include - -void subterranean_round(subterranean_state_t *state) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8; - uint32_t t0, t1; - - /* Load the state up into local variables */ - x0 = state->x[0]; - x1 = state->x[1]; - x2 = state->x[2]; - x3 = state->x[3]; - x4 = state->x[4]; - x5 = state->x[5]; - x6 = state->x[6]; - x7 = state->x[7]; - x8 = state->x[8]; - - /* Step chi: s[i] = s[i] ^ (~(s[i+1) & s[i+2]) */ - #define CHI(a, b) \ - do { \ - t0 = ((a) >> 1) | ((b) << 31); \ - t1 = ((a) >> 2) | ((b) << 30); \ - (a) ^= (~t0) & t1; \ - } while (0) - x8 ^= (x0 << 1); - CHI(x0, x1); CHI(x1, x2); - CHI(x2, x3); CHI(x3, x4); - CHI(x4, x5); CHI(x5, x6); - CHI(x6, x7); CHI(x7, x8); - x8 ^= (~(x8 >> 1)) & (x8 >> 2); - - /* Step itoa: invert s[0] */ - x0 ^= 1U; - - /* Step theta: s[i] = s[i] ^ s[i + 3] ^ s[i + 8] */ - #define THETA(a, b) \ - do { \ - t0 = ((a) >> 3) | ((b) << 29); \ - t1 = ((a) >> 8) | ((b) << 24); \ - (a) ^= t0 ^ t1; \ - } while (0) - x8 = (x8 & 1U) ^ (x0 << 1); - THETA(x0, x1); THETA(x1, x2); - THETA(x2, x3); THETA(x3, x4); - THETA(x4, x5); THETA(x5, x6); - THETA(x6, x7); THETA(x7, x8); - x8 ^= (x8 >> 3) ^ (x8 >> 8); - - /* Step pi: permute the bits with the rule s[i] = s[(i * 12) % 257]. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - #define BCP(x, bit) ((x) & (((uint32_t)1) << (bit))) - #define BUP(x, from, to) \ - (((x) << ((to) - (from))) & (((uint32_t)1) << (to))) - #define BDN(x, from, to) \ - (((x) >> ((from) - (to))) & (((uint32_t)1) << (to))) - state->x[0] = BCP(x0, 0) ^ BDN(x0, 12, 1) ^ BDN(x0, 24, 2) ^ - BDN(x1, 4, 3) ^ BDN(x1, 16, 4) ^ BDN(x1, 28, 5) ^ - BDN(x2, 8, 6) ^ BDN(x2, 20, 7) ^ BUP(x3, 0, 8) ^ - BDN(x3, 12, 9) ^ BDN(x3, 24, 10) ^ BUP(x4, 4, 11) ^ - BDN(x4, 16, 12) ^ BDN(x4, 28, 13) ^ BUP(x5, 8, 14) ^ - BDN(x5, 20, 15) ^ BUP(x6, 0, 16) ^ BUP(x6, 12, 17) ^ - BDN(x6, 24, 18) ^ BUP(x7, 4, 19) ^ BUP(x7, 16, 20) ^ - BDN(x7, 28, 21) ^ BUP(x0, 7, 22) ^ BUP(x0, 19, 23) ^ - BDN(x0, 31, 24) ^ BUP(x1, 11, 25) ^ BUP(x1, 23, 26) ^ - BUP(x2, 3, 27) ^ BUP(x2, 15, 28) ^ BUP(x2, 27, 29) ^ - BUP(x3, 7, 30) ^ BUP(x3, 19, 31); - state->x[1] = BDN(x3, 31, 0) ^ BDN(x4, 11, 1) ^ BDN(x4, 23, 2) ^ - BCP(x5, 3) ^ BDN(x5, 15, 4) ^ BDN(x5, 27, 5) ^ - BDN(x6, 7, 6) ^ BDN(x6, 19, 7) ^ BDN(x6, 31, 8) ^ - BDN(x7, 11, 9) ^ BDN(x7, 23, 10) ^ BUP(x0, 2, 11) ^ - BDN(x0, 14, 12) ^ BDN(x0, 26, 13) ^ BUP(x1, 6, 14) ^ - BDN(x1, 18, 15) ^ BDN(x1, 30, 16) ^ BUP(x2, 10, 17) ^ - BDN(x2, 22, 18) ^ BUP(x3, 2, 19) ^ BUP(x3, 14, 20) ^ - BDN(x3, 26, 21) ^ BUP(x4, 6, 22) ^ BUP(x4, 18, 23) ^ - BDN(x4, 30, 24) ^ BUP(x5, 10, 25) ^ BUP(x5, 22, 26) ^ - BUP(x6, 2, 27) ^ BUP(x6, 14, 28) ^ BUP(x6, 26, 29) ^ - BUP(x7, 6, 30) ^ BUP(x7, 18, 31); - state->x[2] = BDN(x7, 30, 0) ^ BDN(x0, 9, 1) ^ BDN(x0, 21, 2) ^ - BUP(x1, 1, 3) ^ BDN(x1, 13, 4) ^ BDN(x1, 25, 5) ^ - BUP(x2, 5, 6) ^ BDN(x2, 17, 7) ^ BDN(x2, 29, 8) ^ - BCP(x3, 9) ^ BDN(x3, 21, 10) ^ BUP(x4, 1, 11) ^ - BDN(x4, 13, 12) ^ BDN(x4, 25, 13) ^ BUP(x5, 5, 14) ^ - BDN(x5, 17, 15) ^ BDN(x5, 29, 16) ^ BUP(x6, 9, 17) ^ - BDN(x6, 21, 18) ^ BUP(x7, 1, 19) ^ BUP(x7, 13, 20) ^ - BDN(x7, 25, 21) ^ BUP(x0, 4, 22) ^ BUP(x0, 16, 23) ^ - BDN(x0, 28, 24) ^ BUP(x1, 8, 25) ^ BUP(x1, 20, 26) ^ - BUP(x2, 0, 27) ^ BUP(x2, 12, 28) ^ BUP(x2, 24, 29) ^ - BUP(x3, 4, 30) ^ BUP(x3, 16, 31); - state->x[3] = BDN(x3, 28, 0) ^ BDN(x4, 8, 1) ^ BDN(x4, 20, 2) ^ - BUP(x5, 0, 3) ^ BDN(x5, 12, 4) ^ BDN(x5, 24, 5) ^ - BUP(x6, 4, 6) ^ BDN(x6, 16, 7) ^ BDN(x6, 28, 8) ^ - BUP(x7, 8, 9) ^ BDN(x7, 20, 10) ^ BUP(x8, 0, 11) ^ - BUP(x0, 11, 12) ^ BDN(x0, 23, 13) ^ BUP(x1, 3, 14) ^ - BCP(x1, 15) ^ BDN(x1, 27, 16) ^ BUP(x2, 7, 17) ^ - BDN(x2, 19, 18) ^ BDN(x2, 31, 19) ^ BUP(x3, 11, 20) ^ - BDN(x3, 23, 21) ^ BUP(x4, 3, 22) ^ BUP(x4, 15, 23) ^ - BDN(x4, 27, 24) ^ BUP(x5, 7, 25) ^ BUP(x5, 19, 26) ^ - BDN(x5, 31, 27) ^ BUP(x6, 11, 28) ^ BUP(x6, 23, 29) ^ - BUP(x7, 3, 30) ^ BUP(x7, 15, 31); - state->x[4] = BDN(x7, 27, 0) ^ BDN(x0, 6, 1) ^ BDN(x0, 18, 2) ^ - BDN(x0, 30, 3) ^ BDN(x1, 10, 4) ^ BDN(x1, 22, 5) ^ - BUP(x2, 2, 6) ^ BDN(x2, 14, 7) ^ BDN(x2, 26, 8) ^ - BUP(x3, 6, 9) ^ BDN(x3, 18, 10) ^ BDN(x3, 30, 11) ^ - BUP(x4, 10, 12) ^ BDN(x4, 22, 13) ^ BUP(x5, 2, 14) ^ - BUP(x5, 14, 15) ^ BDN(x5, 26, 16) ^ BUP(x6, 6, 17) ^ - BCP(x6, 18) ^ BDN(x6, 30, 19) ^ BUP(x7, 10, 20) ^ - BDN(x7, 22, 21) ^ BUP(x0, 1, 22) ^ BUP(x0, 13, 23) ^ - BDN(x0, 25, 24) ^ BUP(x1, 5, 25) ^ BUP(x1, 17, 26) ^ - BDN(x1, 29, 27) ^ BUP(x2, 9, 28) ^ BUP(x2, 21, 29) ^ - BUP(x3, 1, 30) ^ BUP(x3, 13, 31); - state->x[5] = BDN(x3, 25, 0) ^ BDN(x4, 5, 1) ^ BDN(x4, 17, 2) ^ - BDN(x4, 29, 3) ^ BDN(x5, 9, 4) ^ BDN(x5, 21, 5) ^ - BUP(x6, 1, 6) ^ BDN(x6, 13, 7) ^ BDN(x6, 25, 8) ^ - BUP(x7, 5, 9) ^ BDN(x7, 17, 10) ^ BDN(x7, 29, 11) ^ - BUP(x0, 8, 12) ^ BDN(x0, 20, 13) ^ BUP(x1, 0, 14) ^ - BUP(x1, 12, 15) ^ BDN(x1, 24, 16) ^ BUP(x2, 4, 17) ^ - BUP(x2, 16, 18) ^ BDN(x2, 28, 19) ^ BUP(x3, 8, 20) ^ - BUP(x3, 20, 21) ^ BUP(x4, 0, 22) ^ BUP(x4, 12, 23) ^ - BCP(x4, 24) ^ BUP(x5, 4, 25) ^ BUP(x5, 16, 26) ^ - BDN(x5, 28, 27) ^ BUP(x6, 8, 28) ^ BUP(x6, 20, 29) ^ - BUP(x7, 0, 30) ^ BUP(x7, 12, 31); - state->x[6] = BDN(x7, 24, 0) ^ BDN(x0, 3, 1) ^ BDN(x0, 15, 2) ^ - BDN(x0, 27, 3) ^ BDN(x1, 7, 4) ^ BDN(x1, 19, 5) ^ - BDN(x1, 31, 6) ^ BDN(x2, 11, 7) ^ BDN(x2, 23, 8) ^ - BUP(x3, 3, 9) ^ BDN(x3, 15, 10) ^ BDN(x3, 27, 11) ^ - BUP(x4, 7, 12) ^ BDN(x4, 19, 13) ^ BDN(x4, 31, 14) ^ - BUP(x5, 11, 15) ^ BDN(x5, 23, 16) ^ BUP(x6, 3, 17) ^ - BUP(x6, 15, 18) ^ BDN(x6, 27, 19) ^ BUP(x7, 7, 20) ^ - BUP(x7, 19, 21) ^ BDN(x7, 31, 22) ^ BUP(x0, 10, 23) ^ - BUP(x0, 22, 24) ^ BUP(x1, 2, 25) ^ BUP(x1, 14, 26) ^ - BUP(x1, 26, 27) ^ BUP(x2, 6, 28) ^ BUP(x2, 18, 29) ^ - BCP(x2, 30) ^ BUP(x3, 10, 31); - state->x[7] = BDN(x3, 22, 0) ^ BDN(x4, 2, 1) ^ BDN(x4, 14, 2) ^ - BDN(x4, 26, 3) ^ BDN(x5, 6, 4) ^ BDN(x5, 18, 5) ^ - BDN(x5, 30, 6) ^ BDN(x6, 10, 7) ^ BDN(x6, 22, 8) ^ - BUP(x7, 2, 9) ^ BDN(x7, 14, 10) ^ BDN(x7, 26, 11) ^ - BUP(x0, 5, 12) ^ BDN(x0, 17, 13) ^ BDN(x0, 29, 14) ^ - BUP(x1, 9, 15) ^ BDN(x1, 21, 16) ^ BUP(x2, 1, 17) ^ - BUP(x2, 13, 18) ^ BDN(x2, 25, 19) ^ BUP(x3, 5, 20) ^ - BUP(x3, 17, 21) ^ BDN(x3, 29, 22) ^ BUP(x4, 9, 23) ^ - BUP(x4, 21, 24) ^ BUP(x5, 1, 25) ^ BUP(x5, 13, 26) ^ - BUP(x5, 25, 27) ^ BUP(x6, 5, 28) ^ BUP(x6, 17, 29) ^ - BUP(x6, 29, 30) ^ BUP(x7, 9, 31); - state->x[8] = BDN(x7, 21, 0); -} - -void subterranean_blank(subterranean_state_t *state) -{ - unsigned round; - for (round = 0; round < 8; ++round) { - subterranean_round(state); - state->x[0] ^= 0x02; /* padding for an empty block is in state bit 1 */ - } -} - -void subterranean_duplex_0(subterranean_state_t *state) -{ - subterranean_round(state); - state->x[0] ^= 0x02; /* padding for an empty block is in state bit 1 */ -} - -void subterranean_duplex_1(subterranean_state_t *state, unsigned char data) -{ - uint32_t x = data; - - /* Perform a single Subterranean round before absorbing the bits */ - subterranean_round(state); - - /* Rearrange the bits and absorb them into the state */ - state->x[0] ^= (x << 1) & 0x00000002U; - state->x[1] ^= x & 0x00000008U; - state->x[2] ^= 0x00000001U; /* 9th padding bit is always 1 */ - state->x[4] ^= ((x << 6) & 0x00000100U) ^ ((x << 1) & 0x00000040U); - state->x[5] ^= (x << 15) & 0x00010000U; - state->x[6] ^= (x >> 1) & 0x00000020U; - state->x[7] ^= ((x << 21) & 0x02000000U) ^ ((x << 3) & 0x00000400U); -} - -void subterranean_duplex_word(subterranean_state_t *state, uint32_t x) -{ - uint32_t y; - - /* Perform a single Subterranean round before absorbing the bits */ - subterranean_round(state); - - /* To absorb the word into the state, we first rearrange the source - * bits to be in the right target bit positions. Then we mask and - * XOR them into the relevant words of the state. - * - * Some of the source bits end up in the same target bit but a different - * word so we have to permute the input word twice to get all the source - * bits into the locations we want for masking and XOR'ing. - * - * Permutations generated with "http://programming.sirrida.de/calcperm.php". - */ - - /* P1 = [1 16 8 3 25 * * 10 0 21 * 24 2 31 15 6 * 11 9 19 * * 29 * 4 * 30 12 * 22 17 5] */ - y = (x & 0x00080008U) - | ((x & 0x00004001U) << 1) - | ((x & 0x00000080U) << 3) - | ((x & 0x04000000U) << 4) - | leftRotate6(x & 0x80000004U) - | ((x & 0x00400000U) << 7) - | leftRotate12(x & 0x01000200U) - | ((x & 0x00000800U) << 13) - | ((x & 0x00000002U) << 15) - | ((x & 0x08000000U) >> 15) - | ((x & 0x00002000U) << 18) - | ((x & 0x40000000U) >> 13) - | ((x & 0x00000010U) << 21) - | ((x & 0x00001000U) >> 10) - | ((x & 0x00048000U) >> 9) - | ((x & 0x00000100U) >> 8) - | ((x & 0x20000000U) >> 7) - | ((x & 0x00020000U) >> 6); - - /* P2 = [* * * * * 6 5 * * * 31 * * * * * 17 * * * 0 9 * 15 * 30 * * 1 * * *] */ - x = ((x & 0x00010020U) << 1) - | leftRotate5(x & 0x12000000U) - | ((x & 0x00100000U) >> 20) - | ((x & 0x00200000U) >> 12) - | ((x & 0x00000400U) << 21) - | ((x & 0x00800000U) >> 8) - | ((x & 0x00000040U) >> 1); - - /* Integrate the rearranged bits into the state */ - state->x[0] ^= (y & 0x40428816U); - state->x[1] ^= (y & 0x00000008U); - state->x[2] ^= (y & 0x80000041U); - state->x[3] ^= (x & 0x00008000U); - state->x[4] ^= (y & 0x00001300U) ^ (x & 0x00000041U); - state->x[5] ^= (y & 0x21010020U) ^ (x & 0x40000200U); - state->x[6] ^= (y & 0x00280000U) ^ (x & 0x80000020U); - state->x[7] ^= (y & 0x02000400U) ^ (x & 0x00020002U); -} - -void subterranean_duplex_n - (subterranean_state_t *state, const unsigned char *data, unsigned len) -{ - switch (len) { - case 0: - subterranean_duplex_0(state); - break; - case 1: - subterranean_duplex_1(state, data[0]); - break; - case 2: - /* Load 16 bits and add the padding bit to the 17th bit */ - subterranean_duplex_word - (state, ((uint32_t)(data[0]) | - (((uint32_t)(data[1])) << 8) | - 0x10000U)); - break; - case 3: - /* Load 24 bits and add the padding bit to the 25th bit */ - subterranean_duplex_word - (state, ((uint32_t)(data[0]) | - (((uint32_t)(data[1])) << 8) | - (((uint32_t)(data[2])) << 16) | - 0x01000000U)); - break; - default: - /* Load 32 bits and add the padding bit to the 33rd bit */ - subterranean_duplex_word(state, le_load_word32(data)); - state->x[8] ^= 0x00000001U; - break; - } -} - -uint32_t subterranean_extract(subterranean_state_t *state) -{ - uint32_t x, y; - - /* We need to extract 64 bits from the state, and then XOR the two - * halves together to get the result. - * - * Extract words from the state and permute the bits into the target - * bit order. Then mask off the unnecessary bits and combine. - * - * Permutations generated with "http://programming.sirrida.de/calcperm.php". - */ - - /* P0 = [* 0 12 * 24 * * * 4 * * 17 * * * 14 16 30 * * * * 29 7 * * * * * * 26 *] */ - x = state->x[0]; - x = (x & 0x00010000U) - | ((x & 0x00000800U) << 6) - | ((x & 0x00400000U) << 7) - | ((x & 0x00000004U) << 10) - | ((x & 0x00020000U) << 13) - | ((x & 0x00800000U) >> 16) - | ((x & 0x00000010U) << 20) - | ((x & 0x40000100U) >> 4) - | ((x & 0x00008002U) >> 1); - y = x & 0x65035091U; - - /* P1 = [28 * 10 3 * * * * * * * * 9 * 19 * * * * * * * * * * * * * 6 * * *] */ - x = state->x[1]; - x = (x & 0x00000008U) - | ((x & 0x00004000U) << 5) - | ((x & 0x00000004U) << 8) - | ((x & 0x10000000U) >> 22) - | ((x & 0x00000001U) << 28) - | ((x & 0x00001000U) >> 3); - y ^= x & 0x10080648U; - - /* P2 = [8 * * 25 22 * 15 * * 11 * * * * * * * 1 * * * * * * 21 * * * 31 * * 13] */ - x = state->x[2]; - x = ((x & 0x00000200U) << 2) - | ((x & 0x10000000U) << 3) - | ((x & 0x00000001U) << 8) - | ((x & 0x00000040U) << 9) - | ((x & 0x80000000U) >> 18) - | ((x & 0x00020000U) >> 16) - | ((x & 0x00000010U) << 18) - | ((x & 0x00000008U) << 22) - | ((x & 0x01000000U) >> 3); - y ^= x & 0x8260a902U; - - /* P3 = [* * * * * * * * * * * * * * * 23 * * * * * 27 * * 18 2 * 5 * * * *] */ - x = state->x[3]; - x = ((x & 0x00200000U) << 6) - | ((x & 0x00008000U) << 8) - | ((x & 0x02000000U) >> 23) - | ((x & 0x08000000U) >> 22) - | ((x & 0x01000000U) >> 6); - y ^= x & 0x08840024U; - - /* P4 = [20 20 * * * * 5 * 2 18 * * 27 * * * * * 23 * * * * * * * * * * * * *] */ - x = state->x[4]; - y ^= (x << 20) & 0x00100000U; /* Handle duplicated bit 20 separately */ - x = ((x & 0x00040000U) << 5) - | ((x & 0x00000200U) << 9) - | ((x & 0x00001000U) << 15) - | ((x & 0x00000002U) << 19) - | ((x & 0x00000100U) >> 6) - | ((x & 0x00000040U) >> 1); - y ^= x & 0x08940024U; - - /* P5 = [* * 13 * * 31 * * * 21 * * * * * * 1 * * * * * * * 11 * * 15 * 22 25 *] */ - x = state->x[5]; - x = ((x & 0x00000004U) << 11) - | ((x & 0x00000200U) << 12) - | ((x & 0x00010000U) >> 15) - | ((x & 0x01000000U) >> 13) - | ((x & 0x08000000U) >> 12) - | ((x & 0x20000000U) >> 7) - | ((x & 0x00000020U) << 26) - | ((x & 0x40000000U) >> 5); - y ^= x & 0x8260a802U; - - /* P6 = [* 8 * * * 6 * * * * * * * * * * * * * 19 * 9 * * * * * * * * 3 10] */ - x = state->x[6]; - x = (x & 0x00080000U) - | ((x & 0x00000020U) << 1) - | ((x & 0x40000000U) >> 27) - | ((x & 0x00000002U) << 7) - | ((x & 0x80000000U) >> 21) - | ((x & 0x00200000U) >> 12); - y ^= x & 0x00080748U; - - /* P7 = [* 28 * 26 * * * * * * 7 29 * * * * 30 16 14 * * * 17 * * 4 * * * 24 * 12] */ - x = state->x[7]; - x = ((x & 0x02000000U) >> 21) - | ((x & 0x80000000U) >> 19) - | ((x & 0x00010000U) << 14) - | ((x & 0x00000800U) << 18) - | ((x & 0x00000008U) << 23) - | leftRotate27(x & 0x20400002U) - | ((x & 0x00040000U) >> 4) - | ((x & 0x00000400U) >> 3) - | ((x & 0x00020000U) >> 1); - y ^= x & 0x75035090U; - - /* Word 8 has a single bit - XOR it directly into the result and return */ - return y ^ state->x[8]; -} - -void subterranean_absorb - (subterranean_state_t *state, const unsigned char *data, - unsigned long long len) -{ - while (len >= 4) { - subterranean_duplex_4(state, data); - data += 4; - len -= 4; - } - subterranean_duplex_n(state, data, (unsigned)len); -} - -void subterranean_squeeze - (subterranean_state_t *state, unsigned char *data, unsigned len) -{ - uint32_t word; - while (len > 4) { - word = subterranean_extract(state); - subterranean_duplex_0(state); - le_store_word32(data, word); - data += 4; - len -= 4; - } - if (len == 4) { - word = subterranean_extract(state); - le_store_word32(data, word); - } else if (len == 1) { - word = subterranean_extract(state); - data[0] = (unsigned char)word; - } else if (len == 2) { - word = subterranean_extract(state); - data[0] = (unsigned char)word; - data[1] = (unsigned char)(word >> 8); - } else if (len == 3) { - word = subterranean_extract(state); - data[0] = (unsigned char)word; - data[1] = (unsigned char)(word >> 8); - data[2] = (unsigned char)(word >> 16); - } -} diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.h b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.h deleted file mode 100644 index 71cebb2..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-subterranean.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SUBTERRANEAN_H -#define LW_INTERNAL_SUBTERRANEAN_H - -#include "internal-util.h" - -/** - * \file internal-subterranean.h - * \brief Internal implementation of the Subterranean block operation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Representation of the 257-bit state of Subterranean. - * - * The 257-bit state is represented as nine 32-bit words with only a single - * bit in the last word. - */ -typedef struct -{ - uint32_t x[9]; /**< State words */ - -} subterranean_state_t; - -/** - * \brief Performs a single Subterranean round. - * - * \param state Subterranean state to be transformed. - */ -void subterranean_round(subterranean_state_t *state); - -/** - * \brief Performs 8 Subterranean rounds with no absorption or squeezing - * of data; i.e. data input and output is "blanked". - * - * \param state Subterranean state to be transformed. - */ -void subterranean_blank(subterranean_state_t *state); - -/** - * \brief Performs a single Subterranean round and absorbs 0 bytes. - * - * \param state Subterranean state to be transformed. - */ -void subterranean_duplex_0(subterranean_state_t *state); - -/** - * \brief Performs a single Subterranean round and absorbs one byte. - * - * \param state Subterranean state to be transformed. - * \param data The single byte to be absorbed. - */ -void subterranean_duplex_1(subterranean_state_t *state, unsigned char data); - -/** - * \brief Absorbs a 32-bit word into the Subterranean state. - * - * \param state Subterranean state to be transformed. - * \param x The word to absorb into the state. - */ -void subterranean_duplex_word(subterranean_state_t *state, uint32_t x); - -/** - * \brief Performs a single Subterranean round and absorbs four bytes. - * - * \param state Subterranean state to be transformed. - * \param data Points to the four data bytes to be absorbed. - */ -#define subterranean_duplex_4(state, data) \ - do { \ - subterranean_duplex_word((state), le_load_word32((data))); \ - (state)->x[8] ^= 1; \ - } while (0) - -/** - * \brief Performs a single Subterranean round and absorbs between - * zero and four bytes. - * - * \param state Subterranean state to be transformed. - * \param data Points to the data bytes to be absorbed. - * \param len Length of the data to be absorbed. - */ -void subterranean_duplex_n - (subterranean_state_t *state, const unsigned char *data, unsigned len); - -/** - * \brief Extracts 32 bits of output from the Subterranean state. - * - * \param state Subterranean state to extract the output from. - * - * \return Returns the 32-bit word that was extracted. - */ -uint32_t subterranean_extract(subterranean_state_t *state); - -/** - * \brief Absorbs an arbitrary amount of data, four bytes at a time. - * - * \param state Subterranean state to be transformed. - * \param data Points to the bytes to be absorbed. - * \param len Number of bytes to absorb. - */ -void subterranean_absorb - (subterranean_state_t *state, const unsigned char *data, - unsigned long long len); - -/** - * \brief Squeezes an arbitrary amount of data out of a Subterranean state. - * - * \param state Subterranean state to extract the output from. - * \param data Points to the data buffer to receive the output. - * \param len Number of bytes to be extracted. - */ -void subterranean_squeeze - (subterranean_state_t *state, unsigned char *data, unsigned len); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-util.h b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.c b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.c deleted file mode 100644 index 1bc9fc4..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "subterranean.h" -#include "internal-subterranean.h" -#include - -aead_cipher_t const subterranean_cipher = { - "Subterranean", - SUBTERRANEAN_KEY_SIZE, - SUBTERRANEAN_NONCE_SIZE, - SUBTERRANEAN_TAG_SIZE, - AEAD_FLAG_NONE, - subterranean_aead_encrypt, - subterranean_aead_decrypt -}; - -aead_hash_algorithm_t const subterranean_hash_algorithm = { - "Subterranean-Hash", - sizeof(subterranean_hash_state_t), - SUBTERRANEAN_HASH_SIZE, - AEAD_FLAG_NONE, - subterranean_hash, - (aead_hash_init_t)subterranean_hash_init, - (aead_hash_update_t)subterranean_hash_update, - (aead_hash_finalize_t)subterranean_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -int subterranean_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - subterranean_state_t state; - uint32_t x1, x2; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SUBTERRANEAN_TAG_SIZE; - - /* Initialize the state and absorb the key and nonce */ - memset(&state, 0, sizeof(state)); - subterranean_absorb(&state, k, SUBTERRANEAN_KEY_SIZE); - subterranean_absorb(&state, npub, SUBTERRANEAN_NONCE_SIZE); - subterranean_blank(&state); - - /* Absorb the associated data into the state */ - subterranean_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= 4) { - x1 = le_load_word32(m); - x2 = subterranean_extract(&state) ^ x1; - subterranean_duplex_word(&state, x1); - state.x[8] ^= 1; /* padding for 32-bit blocks */ - le_store_word32(c, x2); - c += 4; - m += 4; - mlen -= 4; - } - switch ((unsigned char)mlen) { - default: - subterranean_duplex_0(&state); - break; - case 1: - x2 = subterranean_extract(&state) ^ m[0]; - subterranean_duplex_n(&state, m, 1); - c[0] = (unsigned char)x2; - break; - case 2: - x2 = subterranean_extract(&state) ^ m[0] ^ (((uint32_t)(m[1])) << 8); - subterranean_duplex_n(&state, m, 2); - c[0] = (unsigned char)x2; - c[1] = (unsigned char)(x2 >> 8); - break; - case 3: - x2 = subterranean_extract(&state) ^ - m[0] ^ (((uint32_t)(m[1])) << 8) ^ (((uint32_t)(m[2])) << 16); - subterranean_duplex_n(&state, m, 3); - c[0] = (unsigned char)x2; - c[1] = (unsigned char)(x2 >> 8); - c[2] = (unsigned char)(x2 >> 16); - break; - } - - /* Generate the authentication tag */ - subterranean_blank(&state); - subterranean_squeeze(&state, c + mlen, SUBTERRANEAN_TAG_SIZE); - return 0; -} - -int subterranean_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - subterranean_state_t state; - unsigned char *mtemp = m; - unsigned char tag[SUBTERRANEAN_TAG_SIZE]; - uint32_t x; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SUBTERRANEAN_TAG_SIZE) - return -1; - *mlen = clen - SUBTERRANEAN_TAG_SIZE; - - /* Initialize the state and absorb the key and nonce */ - memset(&state, 0, sizeof(state)); - subterranean_absorb(&state, k, SUBTERRANEAN_KEY_SIZE); - subterranean_absorb(&state, npub, SUBTERRANEAN_NONCE_SIZE); - subterranean_blank(&state); - - /* Absorb the associated data into the state */ - subterranean_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SUBTERRANEAN_TAG_SIZE; - while (clen >= 4) { - x = le_load_word32(c); - x ^= subterranean_extract(&state); - subterranean_duplex_word(&state, x); - state.x[8] ^= 1; /* padding for 32-bit blocks */ - le_store_word32(m, x); - c += 4; - m += 4; - clen -= 4; - } - switch ((unsigned char)clen) { - default: - subterranean_duplex_0(&state); - break; - case 1: - m[0] = (unsigned char)(subterranean_extract(&state) ^ c[0]); - subterranean_duplex_1(&state, m[0]); - break; - case 2: - x = subterranean_extract(&state) ^ c[0] ^ (((uint32_t)(c[1])) << 8); - m[0] = (unsigned char)x; - m[1] = (unsigned char)(x >> 8); - subterranean_duplex_word(&state, (x & 0xFFFFU) | 0x10000U); - break; - case 3: - x = subterranean_extract(&state) ^ - c[0] ^ (((uint32_t)(c[1])) << 8) ^ (((uint32_t)(c[2])) << 16); - m[0] = (unsigned char)x; - m[1] = (unsigned char)(x >> 8); - m[2] = (unsigned char)(x >> 16); - subterranean_duplex_word(&state, (x & 0x00FFFFFFU) | 0x01000000U); - break; - } - - /* Check the authentication tag */ - subterranean_blank(&state); - subterranean_squeeze(&state, tag, sizeof(tag)); - return aead_check_tag(mtemp, *mlen, tag, c + clen, SUBTERRANEAN_TAG_SIZE); -} - -int subterranean_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - subterranean_state_t state; - memset(&state, 0, sizeof(state)); - while (inlen > 0) { - subterranean_duplex_1(&state, *in++); - subterranean_duplex_0(&state); - --inlen; - } - subterranean_duplex_0(&state); - subterranean_duplex_0(&state); - subterranean_blank(&state); - subterranean_squeeze(&state, out, SUBTERRANEAN_HASH_SIZE); - return 0; -} - -void subterranean_hash_init(subterranean_hash_state_t *state) -{ - memset(state, 0, sizeof(subterranean_hash_state_t)); -} - -void subterranean_hash_update - (subterranean_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - subterranean_state_t *st = (subterranean_state_t *)state; - while (inlen > 0) { - subterranean_duplex_1(st, *in++); - subterranean_duplex_0(st); - --inlen; - } -} - -void subterranean_hash_finalize - (subterranean_hash_state_t *state, unsigned char *out) -{ - subterranean_state_t *st = (subterranean_state_t *)state; - subterranean_duplex_0(st); - subterranean_duplex_0(st); - subterranean_blank(st); - subterranean_squeeze(st, out, SUBTERRANEAN_HASH_SIZE); -} diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.h b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.h deleted file mode 100644 index 148e5e8..0000000 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys-avr/subterranean.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SUBTERRANEAN_H -#define LWCRYPTO_SUBTERRANEAN_H - -#include "aead-common.h" - -/** - * \file subterranean.h - * \brief Subterranean authenticated encryption algorithm. - * - * Subterranean (technically "Subterranean 2.0") is a family of - * algorithms built around the 257-bit Subterranean permutation: - * - * \li Subterranean is an authenticated encryption algorithm with a 128-bit - * key, a 128-bit nonce, and a 128-bit tag. - * \li Subterranean-Hash is a hash algorithm with a 256-bit output. - * - * The Subterranean permutation is intended for hardware implementation. - * It is not structured for efficient software implementation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Subterranean. - */ -#define SUBTERRANEAN_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Subterranean. - */ -#define SUBTERRANEAN_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Subterranean. - */ -#define SUBTERRANEAN_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for Subterranean-Hash. - */ -#define SUBTERRANEAN_HASH_SIZE 32 - -/** - * \brief Meta-information block for the Subterranean cipher. - */ -extern aead_cipher_t const subterranean_cipher; - -/** - * \brief Meta-information block for the SUBTERRANEAN hash algorithm. - */ -extern aead_hash_algorithm_t const subterranean_hash_algorithm; - -/** - * \brief State information for the Subterreaan incremental hash mode. - */ -typedef union -{ - unsigned char state[40]; /**< Current hash state */ - unsigned long long align; /**< For alignment of this structure */ - -} subterranean_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Subterranean. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa subterranean_aead_decrypt() - */ -int subterranean_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Subterranean. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa subterranean_aead_encrypt() - */ -int subterranean_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Subterranean. - * - * \param out Buffer to receive the hash output which must be at least - * SUBTERRANEAN_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * \sa subterranean_hash_init() - */ -int subterranean_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a Subterranean hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa subterranean_hash_update(), subterranean_hash_finalize(), - * subterranean_hash() - */ -void subterranean_hash_init(subterranean_hash_state_t *state); - -/** - * \brief Updates a Subterranean state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa subterranean_hash_init(), subterranean_hash_finalize() - */ -void subterranean_hash_update - (subterranean_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from a Subterranean hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa subterranean_hash_init(), subterranean_hash_update() - */ -void subterranean_hash_finalize - (subterranean_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys/internal-util.h b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/subterranean/Implementations/crypto_aead/subterraneanv1/rhys/internal-util.h +++ b/subterranean/Implementations/crypto_aead/subterraneanv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/api.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/hash.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/hash.c deleted file mode 100644 index 250ae68..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "subterranean.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return subterranean_hash(out, in, inlen); -} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.c deleted file mode 100644 index 1cb64e2..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.c +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-subterranean.h" -#include - -void subterranean_round(subterranean_state_t *state) -{ - uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8; - uint32_t t0, t1; - - /* Load the state up into local variables */ - x0 = state->x[0]; - x1 = state->x[1]; - x2 = state->x[2]; - x3 = state->x[3]; - x4 = state->x[4]; - x5 = state->x[5]; - x6 = state->x[6]; - x7 = state->x[7]; - x8 = state->x[8]; - - /* Step chi: s[i] = s[i] ^ (~(s[i+1) & s[i+2]) */ - #define CHI(a, b) \ - do { \ - t0 = ((a) >> 1) | ((b) << 31); \ - t1 = ((a) >> 2) | ((b) << 30); \ - (a) ^= (~t0) & t1; \ - } while (0) - x8 ^= (x0 << 1); - CHI(x0, x1); CHI(x1, x2); - CHI(x2, x3); CHI(x3, x4); - CHI(x4, x5); CHI(x5, x6); - CHI(x6, x7); CHI(x7, x8); - x8 ^= (~(x8 >> 1)) & (x8 >> 2); - - /* Step itoa: invert s[0] */ - x0 ^= 1U; - - /* Step theta: s[i] = s[i] ^ s[i + 3] ^ s[i + 8] */ - #define THETA(a, b) \ - do { \ - t0 = ((a) >> 3) | ((b) << 29); \ - t1 = ((a) >> 8) | ((b) << 24); \ - (a) ^= t0 ^ t1; \ - } while (0) - x8 = (x8 & 1U) ^ (x0 << 1); - THETA(x0, x1); THETA(x1, x2); - THETA(x2, x3); THETA(x3, x4); - THETA(x4, x5); THETA(x5, x6); - THETA(x6, x7); THETA(x7, x8); - x8 ^= (x8 >> 3) ^ (x8 >> 8); - - /* Step pi: permute the bits with the rule s[i] = s[(i * 12) % 257]. - * BCP = bit copy, BUP = move bit up, BDN = move bit down */ - #define BCP(x, bit) ((x) & (((uint32_t)1) << (bit))) - #define BUP(x, from, to) \ - (((x) << ((to) - (from))) & (((uint32_t)1) << (to))) - #define BDN(x, from, to) \ - (((x) >> ((from) - (to))) & (((uint32_t)1) << (to))) - state->x[0] = BCP(x0, 0) ^ BDN(x0, 12, 1) ^ BDN(x0, 24, 2) ^ - BDN(x1, 4, 3) ^ BDN(x1, 16, 4) ^ BDN(x1, 28, 5) ^ - BDN(x2, 8, 6) ^ BDN(x2, 20, 7) ^ BUP(x3, 0, 8) ^ - BDN(x3, 12, 9) ^ BDN(x3, 24, 10) ^ BUP(x4, 4, 11) ^ - BDN(x4, 16, 12) ^ BDN(x4, 28, 13) ^ BUP(x5, 8, 14) ^ - BDN(x5, 20, 15) ^ BUP(x6, 0, 16) ^ BUP(x6, 12, 17) ^ - BDN(x6, 24, 18) ^ BUP(x7, 4, 19) ^ BUP(x7, 16, 20) ^ - BDN(x7, 28, 21) ^ BUP(x0, 7, 22) ^ BUP(x0, 19, 23) ^ - BDN(x0, 31, 24) ^ BUP(x1, 11, 25) ^ BUP(x1, 23, 26) ^ - BUP(x2, 3, 27) ^ BUP(x2, 15, 28) ^ BUP(x2, 27, 29) ^ - BUP(x3, 7, 30) ^ BUP(x3, 19, 31); - state->x[1] = BDN(x3, 31, 0) ^ BDN(x4, 11, 1) ^ BDN(x4, 23, 2) ^ - BCP(x5, 3) ^ BDN(x5, 15, 4) ^ BDN(x5, 27, 5) ^ - BDN(x6, 7, 6) ^ BDN(x6, 19, 7) ^ BDN(x6, 31, 8) ^ - BDN(x7, 11, 9) ^ BDN(x7, 23, 10) ^ BUP(x0, 2, 11) ^ - BDN(x0, 14, 12) ^ BDN(x0, 26, 13) ^ BUP(x1, 6, 14) ^ - BDN(x1, 18, 15) ^ BDN(x1, 30, 16) ^ BUP(x2, 10, 17) ^ - BDN(x2, 22, 18) ^ BUP(x3, 2, 19) ^ BUP(x3, 14, 20) ^ - BDN(x3, 26, 21) ^ BUP(x4, 6, 22) ^ BUP(x4, 18, 23) ^ - BDN(x4, 30, 24) ^ BUP(x5, 10, 25) ^ BUP(x5, 22, 26) ^ - BUP(x6, 2, 27) ^ BUP(x6, 14, 28) ^ BUP(x6, 26, 29) ^ - BUP(x7, 6, 30) ^ BUP(x7, 18, 31); - state->x[2] = BDN(x7, 30, 0) ^ BDN(x0, 9, 1) ^ BDN(x0, 21, 2) ^ - BUP(x1, 1, 3) ^ BDN(x1, 13, 4) ^ BDN(x1, 25, 5) ^ - BUP(x2, 5, 6) ^ BDN(x2, 17, 7) ^ BDN(x2, 29, 8) ^ - BCP(x3, 9) ^ BDN(x3, 21, 10) ^ BUP(x4, 1, 11) ^ - BDN(x4, 13, 12) ^ BDN(x4, 25, 13) ^ BUP(x5, 5, 14) ^ - BDN(x5, 17, 15) ^ BDN(x5, 29, 16) ^ BUP(x6, 9, 17) ^ - BDN(x6, 21, 18) ^ BUP(x7, 1, 19) ^ BUP(x7, 13, 20) ^ - BDN(x7, 25, 21) ^ BUP(x0, 4, 22) ^ BUP(x0, 16, 23) ^ - BDN(x0, 28, 24) ^ BUP(x1, 8, 25) ^ BUP(x1, 20, 26) ^ - BUP(x2, 0, 27) ^ BUP(x2, 12, 28) ^ BUP(x2, 24, 29) ^ - BUP(x3, 4, 30) ^ BUP(x3, 16, 31); - state->x[3] = BDN(x3, 28, 0) ^ BDN(x4, 8, 1) ^ BDN(x4, 20, 2) ^ - BUP(x5, 0, 3) ^ BDN(x5, 12, 4) ^ BDN(x5, 24, 5) ^ - BUP(x6, 4, 6) ^ BDN(x6, 16, 7) ^ BDN(x6, 28, 8) ^ - BUP(x7, 8, 9) ^ BDN(x7, 20, 10) ^ BUP(x8, 0, 11) ^ - BUP(x0, 11, 12) ^ BDN(x0, 23, 13) ^ BUP(x1, 3, 14) ^ - BCP(x1, 15) ^ BDN(x1, 27, 16) ^ BUP(x2, 7, 17) ^ - BDN(x2, 19, 18) ^ BDN(x2, 31, 19) ^ BUP(x3, 11, 20) ^ - BDN(x3, 23, 21) ^ BUP(x4, 3, 22) ^ BUP(x4, 15, 23) ^ - BDN(x4, 27, 24) ^ BUP(x5, 7, 25) ^ BUP(x5, 19, 26) ^ - BDN(x5, 31, 27) ^ BUP(x6, 11, 28) ^ BUP(x6, 23, 29) ^ - BUP(x7, 3, 30) ^ BUP(x7, 15, 31); - state->x[4] = BDN(x7, 27, 0) ^ BDN(x0, 6, 1) ^ BDN(x0, 18, 2) ^ - BDN(x0, 30, 3) ^ BDN(x1, 10, 4) ^ BDN(x1, 22, 5) ^ - BUP(x2, 2, 6) ^ BDN(x2, 14, 7) ^ BDN(x2, 26, 8) ^ - BUP(x3, 6, 9) ^ BDN(x3, 18, 10) ^ BDN(x3, 30, 11) ^ - BUP(x4, 10, 12) ^ BDN(x4, 22, 13) ^ BUP(x5, 2, 14) ^ - BUP(x5, 14, 15) ^ BDN(x5, 26, 16) ^ BUP(x6, 6, 17) ^ - BCP(x6, 18) ^ BDN(x6, 30, 19) ^ BUP(x7, 10, 20) ^ - BDN(x7, 22, 21) ^ BUP(x0, 1, 22) ^ BUP(x0, 13, 23) ^ - BDN(x0, 25, 24) ^ BUP(x1, 5, 25) ^ BUP(x1, 17, 26) ^ - BDN(x1, 29, 27) ^ BUP(x2, 9, 28) ^ BUP(x2, 21, 29) ^ - BUP(x3, 1, 30) ^ BUP(x3, 13, 31); - state->x[5] = BDN(x3, 25, 0) ^ BDN(x4, 5, 1) ^ BDN(x4, 17, 2) ^ - BDN(x4, 29, 3) ^ BDN(x5, 9, 4) ^ BDN(x5, 21, 5) ^ - BUP(x6, 1, 6) ^ BDN(x6, 13, 7) ^ BDN(x6, 25, 8) ^ - BUP(x7, 5, 9) ^ BDN(x7, 17, 10) ^ BDN(x7, 29, 11) ^ - BUP(x0, 8, 12) ^ BDN(x0, 20, 13) ^ BUP(x1, 0, 14) ^ - BUP(x1, 12, 15) ^ BDN(x1, 24, 16) ^ BUP(x2, 4, 17) ^ - BUP(x2, 16, 18) ^ BDN(x2, 28, 19) ^ BUP(x3, 8, 20) ^ - BUP(x3, 20, 21) ^ BUP(x4, 0, 22) ^ BUP(x4, 12, 23) ^ - BCP(x4, 24) ^ BUP(x5, 4, 25) ^ BUP(x5, 16, 26) ^ - BDN(x5, 28, 27) ^ BUP(x6, 8, 28) ^ BUP(x6, 20, 29) ^ - BUP(x7, 0, 30) ^ BUP(x7, 12, 31); - state->x[6] = BDN(x7, 24, 0) ^ BDN(x0, 3, 1) ^ BDN(x0, 15, 2) ^ - BDN(x0, 27, 3) ^ BDN(x1, 7, 4) ^ BDN(x1, 19, 5) ^ - BDN(x1, 31, 6) ^ BDN(x2, 11, 7) ^ BDN(x2, 23, 8) ^ - BUP(x3, 3, 9) ^ BDN(x3, 15, 10) ^ BDN(x3, 27, 11) ^ - BUP(x4, 7, 12) ^ BDN(x4, 19, 13) ^ BDN(x4, 31, 14) ^ - BUP(x5, 11, 15) ^ BDN(x5, 23, 16) ^ BUP(x6, 3, 17) ^ - BUP(x6, 15, 18) ^ BDN(x6, 27, 19) ^ BUP(x7, 7, 20) ^ - BUP(x7, 19, 21) ^ BDN(x7, 31, 22) ^ BUP(x0, 10, 23) ^ - BUP(x0, 22, 24) ^ BUP(x1, 2, 25) ^ BUP(x1, 14, 26) ^ - BUP(x1, 26, 27) ^ BUP(x2, 6, 28) ^ BUP(x2, 18, 29) ^ - BCP(x2, 30) ^ BUP(x3, 10, 31); - state->x[7] = BDN(x3, 22, 0) ^ BDN(x4, 2, 1) ^ BDN(x4, 14, 2) ^ - BDN(x4, 26, 3) ^ BDN(x5, 6, 4) ^ BDN(x5, 18, 5) ^ - BDN(x5, 30, 6) ^ BDN(x6, 10, 7) ^ BDN(x6, 22, 8) ^ - BUP(x7, 2, 9) ^ BDN(x7, 14, 10) ^ BDN(x7, 26, 11) ^ - BUP(x0, 5, 12) ^ BDN(x0, 17, 13) ^ BDN(x0, 29, 14) ^ - BUP(x1, 9, 15) ^ BDN(x1, 21, 16) ^ BUP(x2, 1, 17) ^ - BUP(x2, 13, 18) ^ BDN(x2, 25, 19) ^ BUP(x3, 5, 20) ^ - BUP(x3, 17, 21) ^ BDN(x3, 29, 22) ^ BUP(x4, 9, 23) ^ - BUP(x4, 21, 24) ^ BUP(x5, 1, 25) ^ BUP(x5, 13, 26) ^ - BUP(x5, 25, 27) ^ BUP(x6, 5, 28) ^ BUP(x6, 17, 29) ^ - BUP(x6, 29, 30) ^ BUP(x7, 9, 31); - state->x[8] = BDN(x7, 21, 0); -} - -void subterranean_blank(subterranean_state_t *state) -{ - unsigned round; - for (round = 0; round < 8; ++round) { - subterranean_round(state); - state->x[0] ^= 0x02; /* padding for an empty block is in state bit 1 */ - } -} - -void subterranean_duplex_0(subterranean_state_t *state) -{ - subterranean_round(state); - state->x[0] ^= 0x02; /* padding for an empty block is in state bit 1 */ -} - -void subterranean_duplex_1(subterranean_state_t *state, unsigned char data) -{ - uint32_t x = data; - - /* Perform a single Subterranean round before absorbing the bits */ - subterranean_round(state); - - /* Rearrange the bits and absorb them into the state */ - state->x[0] ^= (x << 1) & 0x00000002U; - state->x[1] ^= x & 0x00000008U; - state->x[2] ^= 0x00000001U; /* 9th padding bit is always 1 */ - state->x[4] ^= ((x << 6) & 0x00000100U) ^ ((x << 1) & 0x00000040U); - state->x[5] ^= (x << 15) & 0x00010000U; - state->x[6] ^= (x >> 1) & 0x00000020U; - state->x[7] ^= ((x << 21) & 0x02000000U) ^ ((x << 3) & 0x00000400U); -} - -void subterranean_duplex_word(subterranean_state_t *state, uint32_t x) -{ - uint32_t y; - - /* Perform a single Subterranean round before absorbing the bits */ - subterranean_round(state); - - /* To absorb the word into the state, we first rearrange the source - * bits to be in the right target bit positions. Then we mask and - * XOR them into the relevant words of the state. - * - * Some of the source bits end up in the same target bit but a different - * word so we have to permute the input word twice to get all the source - * bits into the locations we want for masking and XOR'ing. - * - * Permutations generated with "http://programming.sirrida.de/calcperm.php". - */ - - /* P1 = [1 16 8 3 25 * * 10 0 21 * 24 2 31 15 6 * 11 9 19 * * 29 * 4 * 30 12 * 22 17 5] */ - y = (x & 0x00080008U) - | ((x & 0x00004001U) << 1) - | ((x & 0x00000080U) << 3) - | ((x & 0x04000000U) << 4) - | leftRotate6(x & 0x80000004U) - | ((x & 0x00400000U) << 7) - | leftRotate12(x & 0x01000200U) - | ((x & 0x00000800U) << 13) - | ((x & 0x00000002U) << 15) - | ((x & 0x08000000U) >> 15) - | ((x & 0x00002000U) << 18) - | ((x & 0x40000000U) >> 13) - | ((x & 0x00000010U) << 21) - | ((x & 0x00001000U) >> 10) - | ((x & 0x00048000U) >> 9) - | ((x & 0x00000100U) >> 8) - | ((x & 0x20000000U) >> 7) - | ((x & 0x00020000U) >> 6); - - /* P2 = [* * * * * 6 5 * * * 31 * * * * * 17 * * * 0 9 * 15 * 30 * * 1 * * *] */ - x = ((x & 0x00010020U) << 1) - | leftRotate5(x & 0x12000000U) - | ((x & 0x00100000U) >> 20) - | ((x & 0x00200000U) >> 12) - | ((x & 0x00000400U) << 21) - | ((x & 0x00800000U) >> 8) - | ((x & 0x00000040U) >> 1); - - /* Integrate the rearranged bits into the state */ - state->x[0] ^= (y & 0x40428816U); - state->x[1] ^= (y & 0x00000008U); - state->x[2] ^= (y & 0x80000041U); - state->x[3] ^= (x & 0x00008000U); - state->x[4] ^= (y & 0x00001300U) ^ (x & 0x00000041U); - state->x[5] ^= (y & 0x21010020U) ^ (x & 0x40000200U); - state->x[6] ^= (y & 0x00280000U) ^ (x & 0x80000020U); - state->x[7] ^= (y & 0x02000400U) ^ (x & 0x00020002U); -} - -void subterranean_duplex_n - (subterranean_state_t *state, const unsigned char *data, unsigned len) -{ - switch (len) { - case 0: - subterranean_duplex_0(state); - break; - case 1: - subterranean_duplex_1(state, data[0]); - break; - case 2: - /* Load 16 bits and add the padding bit to the 17th bit */ - subterranean_duplex_word - (state, ((uint32_t)(data[0]) | - (((uint32_t)(data[1])) << 8) | - 0x10000U)); - break; - case 3: - /* Load 24 bits and add the padding bit to the 25th bit */ - subterranean_duplex_word - (state, ((uint32_t)(data[0]) | - (((uint32_t)(data[1])) << 8) | - (((uint32_t)(data[2])) << 16) | - 0x01000000U)); - break; - default: - /* Load 32 bits and add the padding bit to the 33rd bit */ - subterranean_duplex_word(state, le_load_word32(data)); - state->x[8] ^= 0x00000001U; - break; - } -} - -uint32_t subterranean_extract(subterranean_state_t *state) -{ - uint32_t x, y; - - /* We need to extract 64 bits from the state, and then XOR the two - * halves together to get the result. - * - * Extract words from the state and permute the bits into the target - * bit order. Then mask off the unnecessary bits and combine. - * - * Permutations generated with "http://programming.sirrida.de/calcperm.php". - */ - - /* P0 = [* 0 12 * 24 * * * 4 * * 17 * * * 14 16 30 * * * * 29 7 * * * * * * 26 *] */ - x = state->x[0]; - x = (x & 0x00010000U) - | ((x & 0x00000800U) << 6) - | ((x & 0x00400000U) << 7) - | ((x & 0x00000004U) << 10) - | ((x & 0x00020000U) << 13) - | ((x & 0x00800000U) >> 16) - | ((x & 0x00000010U) << 20) - | ((x & 0x40000100U) >> 4) - | ((x & 0x00008002U) >> 1); - y = x & 0x65035091U; - - /* P1 = [28 * 10 3 * * * * * * * * 9 * 19 * * * * * * * * * * * * * 6 * * *] */ - x = state->x[1]; - x = (x & 0x00000008U) - | ((x & 0x00004000U) << 5) - | ((x & 0x00000004U) << 8) - | ((x & 0x10000000U) >> 22) - | ((x & 0x00000001U) << 28) - | ((x & 0x00001000U) >> 3); - y ^= x & 0x10080648U; - - /* P2 = [8 * * 25 22 * 15 * * 11 * * * * * * * 1 * * * * * * 21 * * * 31 * * 13] */ - x = state->x[2]; - x = ((x & 0x00000200U) << 2) - | ((x & 0x10000000U) << 3) - | ((x & 0x00000001U) << 8) - | ((x & 0x00000040U) << 9) - | ((x & 0x80000000U) >> 18) - | ((x & 0x00020000U) >> 16) - | ((x & 0x00000010U) << 18) - | ((x & 0x00000008U) << 22) - | ((x & 0x01000000U) >> 3); - y ^= x & 0x8260a902U; - - /* P3 = [* * * * * * * * * * * * * * * 23 * * * * * 27 * * 18 2 * 5 * * * *] */ - x = state->x[3]; - x = ((x & 0x00200000U) << 6) - | ((x & 0x00008000U) << 8) - | ((x & 0x02000000U) >> 23) - | ((x & 0x08000000U) >> 22) - | ((x & 0x01000000U) >> 6); - y ^= x & 0x08840024U; - - /* P4 = [20 20 * * * * 5 * 2 18 * * 27 * * * * * 23 * * * * * * * * * * * * *] */ - x = state->x[4]; - y ^= (x << 20) & 0x00100000U; /* Handle duplicated bit 20 separately */ - x = ((x & 0x00040000U) << 5) - | ((x & 0x00000200U) << 9) - | ((x & 0x00001000U) << 15) - | ((x & 0x00000002U) << 19) - | ((x & 0x00000100U) >> 6) - | ((x & 0x00000040U) >> 1); - y ^= x & 0x08940024U; - - /* P5 = [* * 13 * * 31 * * * 21 * * * * * * 1 * * * * * * * 11 * * 15 * 22 25 *] */ - x = state->x[5]; - x = ((x & 0x00000004U) << 11) - | ((x & 0x00000200U) << 12) - | ((x & 0x00010000U) >> 15) - | ((x & 0x01000000U) >> 13) - | ((x & 0x08000000U) >> 12) - | ((x & 0x20000000U) >> 7) - | ((x & 0x00000020U) << 26) - | ((x & 0x40000000U) >> 5); - y ^= x & 0x8260a802U; - - /* P6 = [* 8 * * * 6 * * * * * * * * * * * * * 19 * 9 * * * * * * * * 3 10] */ - x = state->x[6]; - x = (x & 0x00080000U) - | ((x & 0x00000020U) << 1) - | ((x & 0x40000000U) >> 27) - | ((x & 0x00000002U) << 7) - | ((x & 0x80000000U) >> 21) - | ((x & 0x00200000U) >> 12); - y ^= x & 0x00080748U; - - /* P7 = [* 28 * 26 * * * * * * 7 29 * * * * 30 16 14 * * * 17 * * 4 * * * 24 * 12] */ - x = state->x[7]; - x = ((x & 0x02000000U) >> 21) - | ((x & 0x80000000U) >> 19) - | ((x & 0x00010000U) << 14) - | ((x & 0x00000800U) << 18) - | ((x & 0x00000008U) << 23) - | leftRotate27(x & 0x20400002U) - | ((x & 0x00040000U) >> 4) - | ((x & 0x00000400U) >> 3) - | ((x & 0x00020000U) >> 1); - y ^= x & 0x75035090U; - - /* Word 8 has a single bit - XOR it directly into the result and return */ - return y ^ state->x[8]; -} - -void subterranean_absorb - (subterranean_state_t *state, const unsigned char *data, - unsigned long long len) -{ - while (len >= 4) { - subterranean_duplex_4(state, data); - data += 4; - len -= 4; - } - subterranean_duplex_n(state, data, (unsigned)len); -} - -void subterranean_squeeze - (subterranean_state_t *state, unsigned char *data, unsigned len) -{ - uint32_t word; - while (len > 4) { - word = subterranean_extract(state); - subterranean_duplex_0(state); - le_store_word32(data, word); - data += 4; - len -= 4; - } - if (len == 4) { - word = subterranean_extract(state); - le_store_word32(data, word); - } else if (len == 1) { - word = subterranean_extract(state); - data[0] = (unsigned char)word; - } else if (len == 2) { - word = subterranean_extract(state); - data[0] = (unsigned char)word; - data[1] = (unsigned char)(word >> 8); - } else if (len == 3) { - word = subterranean_extract(state); - data[0] = (unsigned char)word; - data[1] = (unsigned char)(word >> 8); - data[2] = (unsigned char)(word >> 16); - } -} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.h deleted file mode 100644 index 71cebb2..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-subterranean.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_SUBTERRANEAN_H -#define LW_INTERNAL_SUBTERRANEAN_H - -#include "internal-util.h" - -/** - * \file internal-subterranean.h - * \brief Internal implementation of the Subterranean block operation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Representation of the 257-bit state of Subterranean. - * - * The 257-bit state is represented as nine 32-bit words with only a single - * bit in the last word. - */ -typedef struct -{ - uint32_t x[9]; /**< State words */ - -} subterranean_state_t; - -/** - * \brief Performs a single Subterranean round. - * - * \param state Subterranean state to be transformed. - */ -void subterranean_round(subterranean_state_t *state); - -/** - * \brief Performs 8 Subterranean rounds with no absorption or squeezing - * of data; i.e. data input and output is "blanked". - * - * \param state Subterranean state to be transformed. - */ -void subterranean_blank(subterranean_state_t *state); - -/** - * \brief Performs a single Subterranean round and absorbs 0 bytes. - * - * \param state Subterranean state to be transformed. - */ -void subterranean_duplex_0(subterranean_state_t *state); - -/** - * \brief Performs a single Subterranean round and absorbs one byte. - * - * \param state Subterranean state to be transformed. - * \param data The single byte to be absorbed. - */ -void subterranean_duplex_1(subterranean_state_t *state, unsigned char data); - -/** - * \brief Absorbs a 32-bit word into the Subterranean state. - * - * \param state Subterranean state to be transformed. - * \param x The word to absorb into the state. - */ -void subterranean_duplex_word(subterranean_state_t *state, uint32_t x); - -/** - * \brief Performs a single Subterranean round and absorbs four bytes. - * - * \param state Subterranean state to be transformed. - * \param data Points to the four data bytes to be absorbed. - */ -#define subterranean_duplex_4(state, data) \ - do { \ - subterranean_duplex_word((state), le_load_word32((data))); \ - (state)->x[8] ^= 1; \ - } while (0) - -/** - * \brief Performs a single Subterranean round and absorbs between - * zero and four bytes. - * - * \param state Subterranean state to be transformed. - * \param data Points to the data bytes to be absorbed. - * \param len Length of the data to be absorbed. - */ -void subterranean_duplex_n - (subterranean_state_t *state, const unsigned char *data, unsigned len); - -/** - * \brief Extracts 32 bits of output from the Subterranean state. - * - * \param state Subterranean state to extract the output from. - * - * \return Returns the 32-bit word that was extracted. - */ -uint32_t subterranean_extract(subterranean_state_t *state); - -/** - * \brief Absorbs an arbitrary amount of data, four bytes at a time. - * - * \param state Subterranean state to be transformed. - * \param data Points to the bytes to be absorbed. - * \param len Number of bytes to absorb. - */ -void subterranean_absorb - (subterranean_state_t *state, const unsigned char *data, - unsigned long long len); - -/** - * \brief Squeezes an arbitrary amount of data out of a Subterranean state. - * - * \param state Subterranean state to extract the output from. - * \param data Points to the data buffer to receive the output. - * \param len Number of bytes to be extracted. - */ -void subterranean_squeeze - (subterranean_state_t *state, unsigned char *data, unsigned len); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-util.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.c deleted file mode 100644 index 1bc9fc4..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "subterranean.h" -#include "internal-subterranean.h" -#include - -aead_cipher_t const subterranean_cipher = { - "Subterranean", - SUBTERRANEAN_KEY_SIZE, - SUBTERRANEAN_NONCE_SIZE, - SUBTERRANEAN_TAG_SIZE, - AEAD_FLAG_NONE, - subterranean_aead_encrypt, - subterranean_aead_decrypt -}; - -aead_hash_algorithm_t const subterranean_hash_algorithm = { - "Subterranean-Hash", - sizeof(subterranean_hash_state_t), - SUBTERRANEAN_HASH_SIZE, - AEAD_FLAG_NONE, - subterranean_hash, - (aead_hash_init_t)subterranean_hash_init, - (aead_hash_update_t)subterranean_hash_update, - (aead_hash_finalize_t)subterranean_hash_finalize, - (aead_xof_absorb_t)0, - (aead_xof_squeeze_t)0 -}; - -int subterranean_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - subterranean_state_t state; - uint32_t x1, x2; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + SUBTERRANEAN_TAG_SIZE; - - /* Initialize the state and absorb the key and nonce */ - memset(&state, 0, sizeof(state)); - subterranean_absorb(&state, k, SUBTERRANEAN_KEY_SIZE); - subterranean_absorb(&state, npub, SUBTERRANEAN_NONCE_SIZE); - subterranean_blank(&state); - - /* Absorb the associated data into the state */ - subterranean_absorb(&state, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - while (mlen >= 4) { - x1 = le_load_word32(m); - x2 = subterranean_extract(&state) ^ x1; - subterranean_duplex_word(&state, x1); - state.x[8] ^= 1; /* padding for 32-bit blocks */ - le_store_word32(c, x2); - c += 4; - m += 4; - mlen -= 4; - } - switch ((unsigned char)mlen) { - default: - subterranean_duplex_0(&state); - break; - case 1: - x2 = subterranean_extract(&state) ^ m[0]; - subterranean_duplex_n(&state, m, 1); - c[0] = (unsigned char)x2; - break; - case 2: - x2 = subterranean_extract(&state) ^ m[0] ^ (((uint32_t)(m[1])) << 8); - subterranean_duplex_n(&state, m, 2); - c[0] = (unsigned char)x2; - c[1] = (unsigned char)(x2 >> 8); - break; - case 3: - x2 = subterranean_extract(&state) ^ - m[0] ^ (((uint32_t)(m[1])) << 8) ^ (((uint32_t)(m[2])) << 16); - subterranean_duplex_n(&state, m, 3); - c[0] = (unsigned char)x2; - c[1] = (unsigned char)(x2 >> 8); - c[2] = (unsigned char)(x2 >> 16); - break; - } - - /* Generate the authentication tag */ - subterranean_blank(&state); - subterranean_squeeze(&state, c + mlen, SUBTERRANEAN_TAG_SIZE); - return 0; -} - -int subterranean_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - subterranean_state_t state; - unsigned char *mtemp = m; - unsigned char tag[SUBTERRANEAN_TAG_SIZE]; - uint32_t x; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < SUBTERRANEAN_TAG_SIZE) - return -1; - *mlen = clen - SUBTERRANEAN_TAG_SIZE; - - /* Initialize the state and absorb the key and nonce */ - memset(&state, 0, sizeof(state)); - subterranean_absorb(&state, k, SUBTERRANEAN_KEY_SIZE); - subterranean_absorb(&state, npub, SUBTERRANEAN_NONCE_SIZE); - subterranean_blank(&state); - - /* Absorb the associated data into the state */ - subterranean_absorb(&state, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - clen -= SUBTERRANEAN_TAG_SIZE; - while (clen >= 4) { - x = le_load_word32(c); - x ^= subterranean_extract(&state); - subterranean_duplex_word(&state, x); - state.x[8] ^= 1; /* padding for 32-bit blocks */ - le_store_word32(m, x); - c += 4; - m += 4; - clen -= 4; - } - switch ((unsigned char)clen) { - default: - subterranean_duplex_0(&state); - break; - case 1: - m[0] = (unsigned char)(subterranean_extract(&state) ^ c[0]); - subterranean_duplex_1(&state, m[0]); - break; - case 2: - x = subterranean_extract(&state) ^ c[0] ^ (((uint32_t)(c[1])) << 8); - m[0] = (unsigned char)x; - m[1] = (unsigned char)(x >> 8); - subterranean_duplex_word(&state, (x & 0xFFFFU) | 0x10000U); - break; - case 3: - x = subterranean_extract(&state) ^ - c[0] ^ (((uint32_t)(c[1])) << 8) ^ (((uint32_t)(c[2])) << 16); - m[0] = (unsigned char)x; - m[1] = (unsigned char)(x >> 8); - m[2] = (unsigned char)(x >> 16); - subterranean_duplex_word(&state, (x & 0x00FFFFFFU) | 0x01000000U); - break; - } - - /* Check the authentication tag */ - subterranean_blank(&state); - subterranean_squeeze(&state, tag, sizeof(tag)); - return aead_check_tag(mtemp, *mlen, tag, c + clen, SUBTERRANEAN_TAG_SIZE); -} - -int subterranean_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - subterranean_state_t state; - memset(&state, 0, sizeof(state)); - while (inlen > 0) { - subterranean_duplex_1(&state, *in++); - subterranean_duplex_0(&state); - --inlen; - } - subterranean_duplex_0(&state); - subterranean_duplex_0(&state); - subterranean_blank(&state); - subterranean_squeeze(&state, out, SUBTERRANEAN_HASH_SIZE); - return 0; -} - -void subterranean_hash_init(subterranean_hash_state_t *state) -{ - memset(state, 0, sizeof(subterranean_hash_state_t)); -} - -void subterranean_hash_update - (subterranean_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - subterranean_state_t *st = (subterranean_state_t *)state; - while (inlen > 0) { - subterranean_duplex_1(st, *in++); - subterranean_duplex_0(st); - --inlen; - } -} - -void subterranean_hash_finalize - (subterranean_hash_state_t *state, unsigned char *out) -{ - subterranean_state_t *st = (subterranean_state_t *)state; - subterranean_duplex_0(st); - subterranean_duplex_0(st); - subterranean_blank(st); - subterranean_squeeze(st, out, SUBTERRANEAN_HASH_SIZE); -} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.h deleted file mode 100644 index 148e5e8..0000000 --- a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys-avr/subterranean.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SUBTERRANEAN_H -#define LWCRYPTO_SUBTERRANEAN_H - -#include "aead-common.h" - -/** - * \file subterranean.h - * \brief Subterranean authenticated encryption algorithm. - * - * Subterranean (technically "Subterranean 2.0") is a family of - * algorithms built around the 257-bit Subterranean permutation: - * - * \li Subterranean is an authenticated encryption algorithm with a 128-bit - * key, a 128-bit nonce, and a 128-bit tag. - * \li Subterranean-Hash is a hash algorithm with a 256-bit output. - * - * The Subterranean permutation is intended for hardware implementation. - * It is not structured for efficient software implementation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Subterranean. - */ -#define SUBTERRANEAN_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Subterranean. - */ -#define SUBTERRANEAN_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Subterranean. - */ -#define SUBTERRANEAN_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for Subterranean-Hash. - */ -#define SUBTERRANEAN_HASH_SIZE 32 - -/** - * \brief Meta-information block for the Subterranean cipher. - */ -extern aead_cipher_t const subterranean_cipher; - -/** - * \brief Meta-information block for the SUBTERRANEAN hash algorithm. - */ -extern aead_hash_algorithm_t const subterranean_hash_algorithm; - -/** - * \brief State information for the Subterreaan incremental hash mode. - */ -typedef union -{ - unsigned char state[40]; /**< Current hash state */ - unsigned long long align; /**< For alignment of this structure */ - -} subterranean_hash_state_t; - -/** - * \brief Encrypts and authenticates a packet with Subterranean. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa subterranean_aead_decrypt() - */ -int subterranean_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Subterranean. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa subterranean_aead_encrypt() - */ -int subterranean_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Subterranean. - * - * \param out Buffer to receive the hash output which must be at least - * SUBTERRANEAN_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - * - * \sa subterranean_hash_init() - */ -int subterranean_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a Subterranean hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa subterranean_hash_update(), subterranean_hash_finalize(), - * subterranean_hash() - */ -void subterranean_hash_init(subterranean_hash_state_t *state); - -/** - * \brief Updates a Subterranean state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - * - * \sa subterranean_hash_init(), subterranean_hash_finalize() - */ -void subterranean_hash_update - (subterranean_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Returns the final hash value from a Subterranean hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the 32-byte hash value. - * - * \sa subterranean_hash_init(), subterranean_hash_update() - */ -void subterranean_hash_finalize - (subterranean_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/api.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/hash.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/hash.c new file mode 100644 index 0000000..250ae68 --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "subterranean.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return subterranean_hash(out, in, inlen); +} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.c new file mode 100644 index 0000000..1cb64e2 --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.c @@ -0,0 +1,441 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-subterranean.h" +#include + +void subterranean_round(subterranean_state_t *state) +{ + uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8; + uint32_t t0, t1; + + /* Load the state up into local variables */ + x0 = state->x[0]; + x1 = state->x[1]; + x2 = state->x[2]; + x3 = state->x[3]; + x4 = state->x[4]; + x5 = state->x[5]; + x6 = state->x[6]; + x7 = state->x[7]; + x8 = state->x[8]; + + /* Step chi: s[i] = s[i] ^ (~(s[i+1) & s[i+2]) */ + #define CHI(a, b) \ + do { \ + t0 = ((a) >> 1) | ((b) << 31); \ + t1 = ((a) >> 2) | ((b) << 30); \ + (a) ^= (~t0) & t1; \ + } while (0) + x8 ^= (x0 << 1); + CHI(x0, x1); CHI(x1, x2); + CHI(x2, x3); CHI(x3, x4); + CHI(x4, x5); CHI(x5, x6); + CHI(x6, x7); CHI(x7, x8); + x8 ^= (~(x8 >> 1)) & (x8 >> 2); + + /* Step itoa: invert s[0] */ + x0 ^= 1U; + + /* Step theta: s[i] = s[i] ^ s[i + 3] ^ s[i + 8] */ + #define THETA(a, b) \ + do { \ + t0 = ((a) >> 3) | ((b) << 29); \ + t1 = ((a) >> 8) | ((b) << 24); \ + (a) ^= t0 ^ t1; \ + } while (0) + x8 = (x8 & 1U) ^ (x0 << 1); + THETA(x0, x1); THETA(x1, x2); + THETA(x2, x3); THETA(x3, x4); + THETA(x4, x5); THETA(x5, x6); + THETA(x6, x7); THETA(x7, x8); + x8 ^= (x8 >> 3) ^ (x8 >> 8); + + /* Step pi: permute the bits with the rule s[i] = s[(i * 12) % 257]. + * BCP = bit copy, BUP = move bit up, BDN = move bit down */ + #define BCP(x, bit) ((x) & (((uint32_t)1) << (bit))) + #define BUP(x, from, to) \ + (((x) << ((to) - (from))) & (((uint32_t)1) << (to))) + #define BDN(x, from, to) \ + (((x) >> ((from) - (to))) & (((uint32_t)1) << (to))) + state->x[0] = BCP(x0, 0) ^ BDN(x0, 12, 1) ^ BDN(x0, 24, 2) ^ + BDN(x1, 4, 3) ^ BDN(x1, 16, 4) ^ BDN(x1, 28, 5) ^ + BDN(x2, 8, 6) ^ BDN(x2, 20, 7) ^ BUP(x3, 0, 8) ^ + BDN(x3, 12, 9) ^ BDN(x3, 24, 10) ^ BUP(x4, 4, 11) ^ + BDN(x4, 16, 12) ^ BDN(x4, 28, 13) ^ BUP(x5, 8, 14) ^ + BDN(x5, 20, 15) ^ BUP(x6, 0, 16) ^ BUP(x6, 12, 17) ^ + BDN(x6, 24, 18) ^ BUP(x7, 4, 19) ^ BUP(x7, 16, 20) ^ + BDN(x7, 28, 21) ^ BUP(x0, 7, 22) ^ BUP(x0, 19, 23) ^ + BDN(x0, 31, 24) ^ BUP(x1, 11, 25) ^ BUP(x1, 23, 26) ^ + BUP(x2, 3, 27) ^ BUP(x2, 15, 28) ^ BUP(x2, 27, 29) ^ + BUP(x3, 7, 30) ^ BUP(x3, 19, 31); + state->x[1] = BDN(x3, 31, 0) ^ BDN(x4, 11, 1) ^ BDN(x4, 23, 2) ^ + BCP(x5, 3) ^ BDN(x5, 15, 4) ^ BDN(x5, 27, 5) ^ + BDN(x6, 7, 6) ^ BDN(x6, 19, 7) ^ BDN(x6, 31, 8) ^ + BDN(x7, 11, 9) ^ BDN(x7, 23, 10) ^ BUP(x0, 2, 11) ^ + BDN(x0, 14, 12) ^ BDN(x0, 26, 13) ^ BUP(x1, 6, 14) ^ + BDN(x1, 18, 15) ^ BDN(x1, 30, 16) ^ BUP(x2, 10, 17) ^ + BDN(x2, 22, 18) ^ BUP(x3, 2, 19) ^ BUP(x3, 14, 20) ^ + BDN(x3, 26, 21) ^ BUP(x4, 6, 22) ^ BUP(x4, 18, 23) ^ + BDN(x4, 30, 24) ^ BUP(x5, 10, 25) ^ BUP(x5, 22, 26) ^ + BUP(x6, 2, 27) ^ BUP(x6, 14, 28) ^ BUP(x6, 26, 29) ^ + BUP(x7, 6, 30) ^ BUP(x7, 18, 31); + state->x[2] = BDN(x7, 30, 0) ^ BDN(x0, 9, 1) ^ BDN(x0, 21, 2) ^ + BUP(x1, 1, 3) ^ BDN(x1, 13, 4) ^ BDN(x1, 25, 5) ^ + BUP(x2, 5, 6) ^ BDN(x2, 17, 7) ^ BDN(x2, 29, 8) ^ + BCP(x3, 9) ^ BDN(x3, 21, 10) ^ BUP(x4, 1, 11) ^ + BDN(x4, 13, 12) ^ BDN(x4, 25, 13) ^ BUP(x5, 5, 14) ^ + BDN(x5, 17, 15) ^ BDN(x5, 29, 16) ^ BUP(x6, 9, 17) ^ + BDN(x6, 21, 18) ^ BUP(x7, 1, 19) ^ BUP(x7, 13, 20) ^ + BDN(x7, 25, 21) ^ BUP(x0, 4, 22) ^ BUP(x0, 16, 23) ^ + BDN(x0, 28, 24) ^ BUP(x1, 8, 25) ^ BUP(x1, 20, 26) ^ + BUP(x2, 0, 27) ^ BUP(x2, 12, 28) ^ BUP(x2, 24, 29) ^ + BUP(x3, 4, 30) ^ BUP(x3, 16, 31); + state->x[3] = BDN(x3, 28, 0) ^ BDN(x4, 8, 1) ^ BDN(x4, 20, 2) ^ + BUP(x5, 0, 3) ^ BDN(x5, 12, 4) ^ BDN(x5, 24, 5) ^ + BUP(x6, 4, 6) ^ BDN(x6, 16, 7) ^ BDN(x6, 28, 8) ^ + BUP(x7, 8, 9) ^ BDN(x7, 20, 10) ^ BUP(x8, 0, 11) ^ + BUP(x0, 11, 12) ^ BDN(x0, 23, 13) ^ BUP(x1, 3, 14) ^ + BCP(x1, 15) ^ BDN(x1, 27, 16) ^ BUP(x2, 7, 17) ^ + BDN(x2, 19, 18) ^ BDN(x2, 31, 19) ^ BUP(x3, 11, 20) ^ + BDN(x3, 23, 21) ^ BUP(x4, 3, 22) ^ BUP(x4, 15, 23) ^ + BDN(x4, 27, 24) ^ BUP(x5, 7, 25) ^ BUP(x5, 19, 26) ^ + BDN(x5, 31, 27) ^ BUP(x6, 11, 28) ^ BUP(x6, 23, 29) ^ + BUP(x7, 3, 30) ^ BUP(x7, 15, 31); + state->x[4] = BDN(x7, 27, 0) ^ BDN(x0, 6, 1) ^ BDN(x0, 18, 2) ^ + BDN(x0, 30, 3) ^ BDN(x1, 10, 4) ^ BDN(x1, 22, 5) ^ + BUP(x2, 2, 6) ^ BDN(x2, 14, 7) ^ BDN(x2, 26, 8) ^ + BUP(x3, 6, 9) ^ BDN(x3, 18, 10) ^ BDN(x3, 30, 11) ^ + BUP(x4, 10, 12) ^ BDN(x4, 22, 13) ^ BUP(x5, 2, 14) ^ + BUP(x5, 14, 15) ^ BDN(x5, 26, 16) ^ BUP(x6, 6, 17) ^ + BCP(x6, 18) ^ BDN(x6, 30, 19) ^ BUP(x7, 10, 20) ^ + BDN(x7, 22, 21) ^ BUP(x0, 1, 22) ^ BUP(x0, 13, 23) ^ + BDN(x0, 25, 24) ^ BUP(x1, 5, 25) ^ BUP(x1, 17, 26) ^ + BDN(x1, 29, 27) ^ BUP(x2, 9, 28) ^ BUP(x2, 21, 29) ^ + BUP(x3, 1, 30) ^ BUP(x3, 13, 31); + state->x[5] = BDN(x3, 25, 0) ^ BDN(x4, 5, 1) ^ BDN(x4, 17, 2) ^ + BDN(x4, 29, 3) ^ BDN(x5, 9, 4) ^ BDN(x5, 21, 5) ^ + BUP(x6, 1, 6) ^ BDN(x6, 13, 7) ^ BDN(x6, 25, 8) ^ + BUP(x7, 5, 9) ^ BDN(x7, 17, 10) ^ BDN(x7, 29, 11) ^ + BUP(x0, 8, 12) ^ BDN(x0, 20, 13) ^ BUP(x1, 0, 14) ^ + BUP(x1, 12, 15) ^ BDN(x1, 24, 16) ^ BUP(x2, 4, 17) ^ + BUP(x2, 16, 18) ^ BDN(x2, 28, 19) ^ BUP(x3, 8, 20) ^ + BUP(x3, 20, 21) ^ BUP(x4, 0, 22) ^ BUP(x4, 12, 23) ^ + BCP(x4, 24) ^ BUP(x5, 4, 25) ^ BUP(x5, 16, 26) ^ + BDN(x5, 28, 27) ^ BUP(x6, 8, 28) ^ BUP(x6, 20, 29) ^ + BUP(x7, 0, 30) ^ BUP(x7, 12, 31); + state->x[6] = BDN(x7, 24, 0) ^ BDN(x0, 3, 1) ^ BDN(x0, 15, 2) ^ + BDN(x0, 27, 3) ^ BDN(x1, 7, 4) ^ BDN(x1, 19, 5) ^ + BDN(x1, 31, 6) ^ BDN(x2, 11, 7) ^ BDN(x2, 23, 8) ^ + BUP(x3, 3, 9) ^ BDN(x3, 15, 10) ^ BDN(x3, 27, 11) ^ + BUP(x4, 7, 12) ^ BDN(x4, 19, 13) ^ BDN(x4, 31, 14) ^ + BUP(x5, 11, 15) ^ BDN(x5, 23, 16) ^ BUP(x6, 3, 17) ^ + BUP(x6, 15, 18) ^ BDN(x6, 27, 19) ^ BUP(x7, 7, 20) ^ + BUP(x7, 19, 21) ^ BDN(x7, 31, 22) ^ BUP(x0, 10, 23) ^ + BUP(x0, 22, 24) ^ BUP(x1, 2, 25) ^ BUP(x1, 14, 26) ^ + BUP(x1, 26, 27) ^ BUP(x2, 6, 28) ^ BUP(x2, 18, 29) ^ + BCP(x2, 30) ^ BUP(x3, 10, 31); + state->x[7] = BDN(x3, 22, 0) ^ BDN(x4, 2, 1) ^ BDN(x4, 14, 2) ^ + BDN(x4, 26, 3) ^ BDN(x5, 6, 4) ^ BDN(x5, 18, 5) ^ + BDN(x5, 30, 6) ^ BDN(x6, 10, 7) ^ BDN(x6, 22, 8) ^ + BUP(x7, 2, 9) ^ BDN(x7, 14, 10) ^ BDN(x7, 26, 11) ^ + BUP(x0, 5, 12) ^ BDN(x0, 17, 13) ^ BDN(x0, 29, 14) ^ + BUP(x1, 9, 15) ^ BDN(x1, 21, 16) ^ BUP(x2, 1, 17) ^ + BUP(x2, 13, 18) ^ BDN(x2, 25, 19) ^ BUP(x3, 5, 20) ^ + BUP(x3, 17, 21) ^ BDN(x3, 29, 22) ^ BUP(x4, 9, 23) ^ + BUP(x4, 21, 24) ^ BUP(x5, 1, 25) ^ BUP(x5, 13, 26) ^ + BUP(x5, 25, 27) ^ BUP(x6, 5, 28) ^ BUP(x6, 17, 29) ^ + BUP(x6, 29, 30) ^ BUP(x7, 9, 31); + state->x[8] = BDN(x7, 21, 0); +} + +void subterranean_blank(subterranean_state_t *state) +{ + unsigned round; + for (round = 0; round < 8; ++round) { + subterranean_round(state); + state->x[0] ^= 0x02; /* padding for an empty block is in state bit 1 */ + } +} + +void subterranean_duplex_0(subterranean_state_t *state) +{ + subterranean_round(state); + state->x[0] ^= 0x02; /* padding for an empty block is in state bit 1 */ +} + +void subterranean_duplex_1(subterranean_state_t *state, unsigned char data) +{ + uint32_t x = data; + + /* Perform a single Subterranean round before absorbing the bits */ + subterranean_round(state); + + /* Rearrange the bits and absorb them into the state */ + state->x[0] ^= (x << 1) & 0x00000002U; + state->x[1] ^= x & 0x00000008U; + state->x[2] ^= 0x00000001U; /* 9th padding bit is always 1 */ + state->x[4] ^= ((x << 6) & 0x00000100U) ^ ((x << 1) & 0x00000040U); + state->x[5] ^= (x << 15) & 0x00010000U; + state->x[6] ^= (x >> 1) & 0x00000020U; + state->x[7] ^= ((x << 21) & 0x02000000U) ^ ((x << 3) & 0x00000400U); +} + +void subterranean_duplex_word(subterranean_state_t *state, uint32_t x) +{ + uint32_t y; + + /* Perform a single Subterranean round before absorbing the bits */ + subterranean_round(state); + + /* To absorb the word into the state, we first rearrange the source + * bits to be in the right target bit positions. Then we mask and + * XOR them into the relevant words of the state. + * + * Some of the source bits end up in the same target bit but a different + * word so we have to permute the input word twice to get all the source + * bits into the locations we want for masking and XOR'ing. + * + * Permutations generated with "http://programming.sirrida.de/calcperm.php". + */ + + /* P1 = [1 16 8 3 25 * * 10 0 21 * 24 2 31 15 6 * 11 9 19 * * 29 * 4 * 30 12 * 22 17 5] */ + y = (x & 0x00080008U) + | ((x & 0x00004001U) << 1) + | ((x & 0x00000080U) << 3) + | ((x & 0x04000000U) << 4) + | leftRotate6(x & 0x80000004U) + | ((x & 0x00400000U) << 7) + | leftRotate12(x & 0x01000200U) + | ((x & 0x00000800U) << 13) + | ((x & 0x00000002U) << 15) + | ((x & 0x08000000U) >> 15) + | ((x & 0x00002000U) << 18) + | ((x & 0x40000000U) >> 13) + | ((x & 0x00000010U) << 21) + | ((x & 0x00001000U) >> 10) + | ((x & 0x00048000U) >> 9) + | ((x & 0x00000100U) >> 8) + | ((x & 0x20000000U) >> 7) + | ((x & 0x00020000U) >> 6); + + /* P2 = [* * * * * 6 5 * * * 31 * * * * * 17 * * * 0 9 * 15 * 30 * * 1 * * *] */ + x = ((x & 0x00010020U) << 1) + | leftRotate5(x & 0x12000000U) + | ((x & 0x00100000U) >> 20) + | ((x & 0x00200000U) >> 12) + | ((x & 0x00000400U) << 21) + | ((x & 0x00800000U) >> 8) + | ((x & 0x00000040U) >> 1); + + /* Integrate the rearranged bits into the state */ + state->x[0] ^= (y & 0x40428816U); + state->x[1] ^= (y & 0x00000008U); + state->x[2] ^= (y & 0x80000041U); + state->x[3] ^= (x & 0x00008000U); + state->x[4] ^= (y & 0x00001300U) ^ (x & 0x00000041U); + state->x[5] ^= (y & 0x21010020U) ^ (x & 0x40000200U); + state->x[6] ^= (y & 0x00280000U) ^ (x & 0x80000020U); + state->x[7] ^= (y & 0x02000400U) ^ (x & 0x00020002U); +} + +void subterranean_duplex_n + (subterranean_state_t *state, const unsigned char *data, unsigned len) +{ + switch (len) { + case 0: + subterranean_duplex_0(state); + break; + case 1: + subterranean_duplex_1(state, data[0]); + break; + case 2: + /* Load 16 bits and add the padding bit to the 17th bit */ + subterranean_duplex_word + (state, ((uint32_t)(data[0]) | + (((uint32_t)(data[1])) << 8) | + 0x10000U)); + break; + case 3: + /* Load 24 bits and add the padding bit to the 25th bit */ + subterranean_duplex_word + (state, ((uint32_t)(data[0]) | + (((uint32_t)(data[1])) << 8) | + (((uint32_t)(data[2])) << 16) | + 0x01000000U)); + break; + default: + /* Load 32 bits and add the padding bit to the 33rd bit */ + subterranean_duplex_word(state, le_load_word32(data)); + state->x[8] ^= 0x00000001U; + break; + } +} + +uint32_t subterranean_extract(subterranean_state_t *state) +{ + uint32_t x, y; + + /* We need to extract 64 bits from the state, and then XOR the two + * halves together to get the result. + * + * Extract words from the state and permute the bits into the target + * bit order. Then mask off the unnecessary bits and combine. + * + * Permutations generated with "http://programming.sirrida.de/calcperm.php". + */ + + /* P0 = [* 0 12 * 24 * * * 4 * * 17 * * * 14 16 30 * * * * 29 7 * * * * * * 26 *] */ + x = state->x[0]; + x = (x & 0x00010000U) + | ((x & 0x00000800U) << 6) + | ((x & 0x00400000U) << 7) + | ((x & 0x00000004U) << 10) + | ((x & 0x00020000U) << 13) + | ((x & 0x00800000U) >> 16) + | ((x & 0x00000010U) << 20) + | ((x & 0x40000100U) >> 4) + | ((x & 0x00008002U) >> 1); + y = x & 0x65035091U; + + /* P1 = [28 * 10 3 * * * * * * * * 9 * 19 * * * * * * * * * * * * * 6 * * *] */ + x = state->x[1]; + x = (x & 0x00000008U) + | ((x & 0x00004000U) << 5) + | ((x & 0x00000004U) << 8) + | ((x & 0x10000000U) >> 22) + | ((x & 0x00000001U) << 28) + | ((x & 0x00001000U) >> 3); + y ^= x & 0x10080648U; + + /* P2 = [8 * * 25 22 * 15 * * 11 * * * * * * * 1 * * * * * * 21 * * * 31 * * 13] */ + x = state->x[2]; + x = ((x & 0x00000200U) << 2) + | ((x & 0x10000000U) << 3) + | ((x & 0x00000001U) << 8) + | ((x & 0x00000040U) << 9) + | ((x & 0x80000000U) >> 18) + | ((x & 0x00020000U) >> 16) + | ((x & 0x00000010U) << 18) + | ((x & 0x00000008U) << 22) + | ((x & 0x01000000U) >> 3); + y ^= x & 0x8260a902U; + + /* P3 = [* * * * * * * * * * * * * * * 23 * * * * * 27 * * 18 2 * 5 * * * *] */ + x = state->x[3]; + x = ((x & 0x00200000U) << 6) + | ((x & 0x00008000U) << 8) + | ((x & 0x02000000U) >> 23) + | ((x & 0x08000000U) >> 22) + | ((x & 0x01000000U) >> 6); + y ^= x & 0x08840024U; + + /* P4 = [20 20 * * * * 5 * 2 18 * * 27 * * * * * 23 * * * * * * * * * * * * *] */ + x = state->x[4]; + y ^= (x << 20) & 0x00100000U; /* Handle duplicated bit 20 separately */ + x = ((x & 0x00040000U) << 5) + | ((x & 0x00000200U) << 9) + | ((x & 0x00001000U) << 15) + | ((x & 0x00000002U) << 19) + | ((x & 0x00000100U) >> 6) + | ((x & 0x00000040U) >> 1); + y ^= x & 0x08940024U; + + /* P5 = [* * 13 * * 31 * * * 21 * * * * * * 1 * * * * * * * 11 * * 15 * 22 25 *] */ + x = state->x[5]; + x = ((x & 0x00000004U) << 11) + | ((x & 0x00000200U) << 12) + | ((x & 0x00010000U) >> 15) + | ((x & 0x01000000U) >> 13) + | ((x & 0x08000000U) >> 12) + | ((x & 0x20000000U) >> 7) + | ((x & 0x00000020U) << 26) + | ((x & 0x40000000U) >> 5); + y ^= x & 0x8260a802U; + + /* P6 = [* 8 * * * 6 * * * * * * * * * * * * * 19 * 9 * * * * * * * * 3 10] */ + x = state->x[6]; + x = (x & 0x00080000U) + | ((x & 0x00000020U) << 1) + | ((x & 0x40000000U) >> 27) + | ((x & 0x00000002U) << 7) + | ((x & 0x80000000U) >> 21) + | ((x & 0x00200000U) >> 12); + y ^= x & 0x00080748U; + + /* P7 = [* 28 * 26 * * * * * * 7 29 * * * * 30 16 14 * * * 17 * * 4 * * * 24 * 12] */ + x = state->x[7]; + x = ((x & 0x02000000U) >> 21) + | ((x & 0x80000000U) >> 19) + | ((x & 0x00010000U) << 14) + | ((x & 0x00000800U) << 18) + | ((x & 0x00000008U) << 23) + | leftRotate27(x & 0x20400002U) + | ((x & 0x00040000U) >> 4) + | ((x & 0x00000400U) >> 3) + | ((x & 0x00020000U) >> 1); + y ^= x & 0x75035090U; + + /* Word 8 has a single bit - XOR it directly into the result and return */ + return y ^ state->x[8]; +} + +void subterranean_absorb + (subterranean_state_t *state, const unsigned char *data, + unsigned long long len) +{ + while (len >= 4) { + subterranean_duplex_4(state, data); + data += 4; + len -= 4; + } + subterranean_duplex_n(state, data, (unsigned)len); +} + +void subterranean_squeeze + (subterranean_state_t *state, unsigned char *data, unsigned len) +{ + uint32_t word; + while (len > 4) { + word = subterranean_extract(state); + subterranean_duplex_0(state); + le_store_word32(data, word); + data += 4; + len -= 4; + } + if (len == 4) { + word = subterranean_extract(state); + le_store_word32(data, word); + } else if (len == 1) { + word = subterranean_extract(state); + data[0] = (unsigned char)word; + } else if (len == 2) { + word = subterranean_extract(state); + data[0] = (unsigned char)word; + data[1] = (unsigned char)(word >> 8); + } else if (len == 3) { + word = subterranean_extract(state); + data[0] = (unsigned char)word; + data[1] = (unsigned char)(word >> 8); + data[2] = (unsigned char)(word >> 16); + } +} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.h new file mode 100644 index 0000000..71cebb2 --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-subterranean.h @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_SUBTERRANEAN_H +#define LW_INTERNAL_SUBTERRANEAN_H + +#include "internal-util.h" + +/** + * \file internal-subterranean.h + * \brief Internal implementation of the Subterranean block operation. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Representation of the 257-bit state of Subterranean. + * + * The 257-bit state is represented as nine 32-bit words with only a single + * bit in the last word. + */ +typedef struct +{ + uint32_t x[9]; /**< State words */ + +} subterranean_state_t; + +/** + * \brief Performs a single Subterranean round. + * + * \param state Subterranean state to be transformed. + */ +void subterranean_round(subterranean_state_t *state); + +/** + * \brief Performs 8 Subterranean rounds with no absorption or squeezing + * of data; i.e. data input and output is "blanked". + * + * \param state Subterranean state to be transformed. + */ +void subterranean_blank(subterranean_state_t *state); + +/** + * \brief Performs a single Subterranean round and absorbs 0 bytes. + * + * \param state Subterranean state to be transformed. + */ +void subterranean_duplex_0(subterranean_state_t *state); + +/** + * \brief Performs a single Subterranean round and absorbs one byte. + * + * \param state Subterranean state to be transformed. + * \param data The single byte to be absorbed. + */ +void subterranean_duplex_1(subterranean_state_t *state, unsigned char data); + +/** + * \brief Absorbs a 32-bit word into the Subterranean state. + * + * \param state Subterranean state to be transformed. + * \param x The word to absorb into the state. + */ +void subterranean_duplex_word(subterranean_state_t *state, uint32_t x); + +/** + * \brief Performs a single Subterranean round and absorbs four bytes. + * + * \param state Subterranean state to be transformed. + * \param data Points to the four data bytes to be absorbed. + */ +#define subterranean_duplex_4(state, data) \ + do { \ + subterranean_duplex_word((state), le_load_word32((data))); \ + (state)->x[8] ^= 1; \ + } while (0) + +/** + * \brief Performs a single Subterranean round and absorbs between + * zero and four bytes. + * + * \param state Subterranean state to be transformed. + * \param data Points to the data bytes to be absorbed. + * \param len Length of the data to be absorbed. + */ +void subterranean_duplex_n + (subterranean_state_t *state, const unsigned char *data, unsigned len); + +/** + * \brief Extracts 32 bits of output from the Subterranean state. + * + * \param state Subterranean state to extract the output from. + * + * \return Returns the 32-bit word that was extracted. + */ +uint32_t subterranean_extract(subterranean_state_t *state); + +/** + * \brief Absorbs an arbitrary amount of data, four bytes at a time. + * + * \param state Subterranean state to be transformed. + * \param data Points to the bytes to be absorbed. + * \param len Number of bytes to absorb. + */ +void subterranean_absorb + (subterranean_state_t *state, const unsigned char *data, + unsigned long long len); + +/** + * \brief Squeezes an arbitrary amount of data out of a Subterranean state. + * + * \param state Subterranean state to extract the output from. + * \param data Points to the data buffer to receive the output. + * \param len Number of bytes to be extracted. + */ +void subterranean_squeeze + (subterranean_state_t *state, unsigned char *data, unsigned len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-util.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.c b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.c new file mode 100644 index 0000000..1bc9fc4 --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.c @@ -0,0 +1,228 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "subterranean.h" +#include "internal-subterranean.h" +#include + +aead_cipher_t const subterranean_cipher = { + "Subterranean", + SUBTERRANEAN_KEY_SIZE, + SUBTERRANEAN_NONCE_SIZE, + SUBTERRANEAN_TAG_SIZE, + AEAD_FLAG_NONE, + subterranean_aead_encrypt, + subterranean_aead_decrypt +}; + +aead_hash_algorithm_t const subterranean_hash_algorithm = { + "Subterranean-Hash", + sizeof(subterranean_hash_state_t), + SUBTERRANEAN_HASH_SIZE, + AEAD_FLAG_NONE, + subterranean_hash, + (aead_hash_init_t)subterranean_hash_init, + (aead_hash_update_t)subterranean_hash_update, + (aead_hash_finalize_t)subterranean_hash_finalize, + (aead_xof_absorb_t)0, + (aead_xof_squeeze_t)0 +}; + +int subterranean_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + subterranean_state_t state; + uint32_t x1, x2; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + SUBTERRANEAN_TAG_SIZE; + + /* Initialize the state and absorb the key and nonce */ + memset(&state, 0, sizeof(state)); + subterranean_absorb(&state, k, SUBTERRANEAN_KEY_SIZE); + subterranean_absorb(&state, npub, SUBTERRANEAN_NONCE_SIZE); + subterranean_blank(&state); + + /* Absorb the associated data into the state */ + subterranean_absorb(&state, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + while (mlen >= 4) { + x1 = le_load_word32(m); + x2 = subterranean_extract(&state) ^ x1; + subterranean_duplex_word(&state, x1); + state.x[8] ^= 1; /* padding for 32-bit blocks */ + le_store_word32(c, x2); + c += 4; + m += 4; + mlen -= 4; + } + switch ((unsigned char)mlen) { + default: + subterranean_duplex_0(&state); + break; + case 1: + x2 = subterranean_extract(&state) ^ m[0]; + subterranean_duplex_n(&state, m, 1); + c[0] = (unsigned char)x2; + break; + case 2: + x2 = subterranean_extract(&state) ^ m[0] ^ (((uint32_t)(m[1])) << 8); + subterranean_duplex_n(&state, m, 2); + c[0] = (unsigned char)x2; + c[1] = (unsigned char)(x2 >> 8); + break; + case 3: + x2 = subterranean_extract(&state) ^ + m[0] ^ (((uint32_t)(m[1])) << 8) ^ (((uint32_t)(m[2])) << 16); + subterranean_duplex_n(&state, m, 3); + c[0] = (unsigned char)x2; + c[1] = (unsigned char)(x2 >> 8); + c[2] = (unsigned char)(x2 >> 16); + break; + } + + /* Generate the authentication tag */ + subterranean_blank(&state); + subterranean_squeeze(&state, c + mlen, SUBTERRANEAN_TAG_SIZE); + return 0; +} + +int subterranean_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + subterranean_state_t state; + unsigned char *mtemp = m; + unsigned char tag[SUBTERRANEAN_TAG_SIZE]; + uint32_t x; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < SUBTERRANEAN_TAG_SIZE) + return -1; + *mlen = clen - SUBTERRANEAN_TAG_SIZE; + + /* Initialize the state and absorb the key and nonce */ + memset(&state, 0, sizeof(state)); + subterranean_absorb(&state, k, SUBTERRANEAN_KEY_SIZE); + subterranean_absorb(&state, npub, SUBTERRANEAN_NONCE_SIZE); + subterranean_blank(&state); + + /* Absorb the associated data into the state */ + subterranean_absorb(&state, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + clen -= SUBTERRANEAN_TAG_SIZE; + while (clen >= 4) { + x = le_load_word32(c); + x ^= subterranean_extract(&state); + subterranean_duplex_word(&state, x); + state.x[8] ^= 1; /* padding for 32-bit blocks */ + le_store_word32(m, x); + c += 4; + m += 4; + clen -= 4; + } + switch ((unsigned char)clen) { + default: + subterranean_duplex_0(&state); + break; + case 1: + m[0] = (unsigned char)(subterranean_extract(&state) ^ c[0]); + subterranean_duplex_1(&state, m[0]); + break; + case 2: + x = subterranean_extract(&state) ^ c[0] ^ (((uint32_t)(c[1])) << 8); + m[0] = (unsigned char)x; + m[1] = (unsigned char)(x >> 8); + subterranean_duplex_word(&state, (x & 0xFFFFU) | 0x10000U); + break; + case 3: + x = subterranean_extract(&state) ^ + c[0] ^ (((uint32_t)(c[1])) << 8) ^ (((uint32_t)(c[2])) << 16); + m[0] = (unsigned char)x; + m[1] = (unsigned char)(x >> 8); + m[2] = (unsigned char)(x >> 16); + subterranean_duplex_word(&state, (x & 0x00FFFFFFU) | 0x01000000U); + break; + } + + /* Check the authentication tag */ + subterranean_blank(&state); + subterranean_squeeze(&state, tag, sizeof(tag)); + return aead_check_tag(mtemp, *mlen, tag, c + clen, SUBTERRANEAN_TAG_SIZE); +} + +int subterranean_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + subterranean_state_t state; + memset(&state, 0, sizeof(state)); + while (inlen > 0) { + subterranean_duplex_1(&state, *in++); + subterranean_duplex_0(&state); + --inlen; + } + subterranean_duplex_0(&state); + subterranean_duplex_0(&state); + subterranean_blank(&state); + subterranean_squeeze(&state, out, SUBTERRANEAN_HASH_SIZE); + return 0; +} + +void subterranean_hash_init(subterranean_hash_state_t *state) +{ + memset(state, 0, sizeof(subterranean_hash_state_t)); +} + +void subterranean_hash_update + (subterranean_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + subterranean_state_t *st = (subterranean_state_t *)state; + while (inlen > 0) { + subterranean_duplex_1(st, *in++); + subterranean_duplex_0(st); + --inlen; + } +} + +void subterranean_hash_finalize + (subterranean_hash_state_t *state, unsigned char *out) +{ + subterranean_state_t *st = (subterranean_state_t *)state; + subterranean_duplex_0(st); + subterranean_duplex_0(st); + subterranean_blank(st); + subterranean_squeeze(st, out, SUBTERRANEAN_HASH_SIZE); +} diff --git a/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.h b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.h new file mode 100644 index 0000000..148e5e8 --- /dev/null +++ b/subterranean/Implementations/crypto_hash/subterraneanv1/rhys/subterranean.h @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_SUBTERRANEAN_H +#define LWCRYPTO_SUBTERRANEAN_H + +#include "aead-common.h" + +/** + * \file subterranean.h + * \brief Subterranean authenticated encryption algorithm. + * + * Subterranean (technically "Subterranean 2.0") is a family of + * algorithms built around the 257-bit Subterranean permutation: + * + * \li Subterranean is an authenticated encryption algorithm with a 128-bit + * key, a 128-bit nonce, and a 128-bit tag. + * \li Subterranean-Hash is a hash algorithm with a 256-bit output. + * + * The Subterranean permutation is intended for hardware implementation. + * It is not structured for efficient software implementation. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for Subterranean. + */ +#define SUBTERRANEAN_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for Subterranean. + */ +#define SUBTERRANEAN_TAG_SIZE 16 + +/** + * \brief Size of the nonce for Subterranean. + */ +#define SUBTERRANEAN_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for Subterranean-Hash. + */ +#define SUBTERRANEAN_HASH_SIZE 32 + +/** + * \brief Meta-information block for the Subterranean cipher. + */ +extern aead_cipher_t const subterranean_cipher; + +/** + * \brief Meta-information block for the SUBTERRANEAN hash algorithm. + */ +extern aead_hash_algorithm_t const subterranean_hash_algorithm; + +/** + * \brief State information for the Subterreaan incremental hash mode. + */ +typedef union +{ + unsigned char state[40]; /**< Current hash state */ + unsigned long long align; /**< For alignment of this structure */ + +} subterranean_hash_state_t; + +/** + * \brief Encrypts and authenticates a packet with Subterranean. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa subterranean_aead_decrypt() + */ +int subterranean_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Subterranean. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa subterranean_aead_encrypt() + */ +int subterranean_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with Subterranean. + * + * \param out Buffer to receive the hash output which must be at least + * SUBTERRANEAN_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + * + * \sa subterranean_hash_init() + */ +int subterranean_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a Subterranean hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa subterranean_hash_update(), subterranean_hash_finalize(), + * subterranean_hash() + */ +void subterranean_hash_init(subterranean_hash_state_t *state); + +/** + * \brief Updates a Subterranean state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + * + * \sa subterranean_hash_init(), subterranean_hash_finalize() + */ +void subterranean_hash_update + (subterranean_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Returns the final hash value from a Subterranean hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the 32-byte hash value. + * + * \sa subterranean_hash_init(), subterranean_hash_update() + */ +void subterranean_hash_finalize + (subterranean_hash_state_t *state, unsigned char *out); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.c b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/api.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/api.h deleted file mode 100644 index 4bd426b..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 0 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/encrypt.c b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/encrypt.c deleted file mode 100644 index 50af7fb..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sundae-gift.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_0_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_0_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128-config.h deleted file mode 100644 index 62131ba..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128-config.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_CONFIG_H -#define LW_INTERNAL_GIFT128_CONFIG_H - -/** - * \file internal-gift128-config.h - * \brief Configures the variant of GIFT-128 to use. - */ - -/** - * \brief Select the full variant of GIFT-128. - * - * The full variant requires 320 bytes for the key schedule and uses the - * fixslicing method to implement encryption and decryption. - */ -#define GIFT128_VARIANT_FULL 0 - -/** - * \brief Select the small variant of GIFT-128. - * - * The small variant requires 80 bytes for the key schedule. The rest - * of the key schedule is expanded on the fly during encryption. - * - * The fixslicing method is used to implement encryption and the slower - * bitslicing method is used to implement decryption. The small variant - * is suitable when memory is at a premium, decryption is not needed, - * but encryption performance is still important. - */ -#define GIFT128_VARIANT_SMALL 1 - -/** - * \brief Select the tiny variant of GIFT-128. - * - * The tiny variant requires 16 bytes for the key schedule and uses the - * bitslicing method to implement encryption and decryption. It is suitable - * for use when memory is very tight and performance is not critical. - */ -#define GIFT128_VARIANT_TINY 2 - -/** - * \def GIFT128_VARIANT - * \brief Selects the default variant of GIFT-128 to use on this platform. - */ -/** - * \def GIFT128_VARIANT_ASM - * \brief Defined to 1 if the GIFT-128 implementation has been replaced - * with an assembly code version. - */ -#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 1 -#endif -#if !defined(GIFT128_VARIANT) -#define GIFT128_VARIANT GIFT128_VARIANT_FULL -#endif -#if !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 0 -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.c deleted file mode 100644 index c6ac5ec..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift128.h" -#include "internal-util.h" - -#if !GIFT128_VARIANT_ASM - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC_fixsliced[40] = { - 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, - 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, - 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, - 0x03020180, 0x8000002b, 0x10080880, 0x60014000, 0x01400002, 0x02020080, - 0x80000021, 0x10000080, 0x0001c000, 0x51000002, 0x03010180, 0x8000002e, - 0x10088800, 0x60012000, 0x40500002, 0x01030080, 0x80000006, 0x10008808, - 0xc001a000, 0x14500002, 0x01020181, 0x8000001a -}; - -#endif - -#if GIFT128_VARIANT != GIFT128_VARIANT_FULL - -/* Round constants for GIFT-128 in the bitsliced representation */ -static uint8_t const GIFT128_RC[40] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, - 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A -}; - -#endif - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 8 bits with respect to the next: - * - * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 - * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 - * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 - * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 - * - * The most efficient permutation from the online generator was P3, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P3 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate8(_x); \ - } while (0) -#define PERM1(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate16(_x); \ - } while (0) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate24(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -#define INV_PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x00550055, 9); \ - bit_permute_step(x, 0x00003333, 18); \ - bit_permute_step(x, 0x000f000f, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate8(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) \ - do { \ - uint32_t _x = rightRotate16(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate24(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = (x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); -} - -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); -} - -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift128b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t tmp = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= tmp; \ - (a) ^= tmp << (shift); \ - } while (0) - -/** - * \brief Derives the next 10 fixsliced keys in the key schedule. - * - * \param next Points to the buffer to receive the next 10 keys. - * \param prev Points to the buffer holding the previous 10 keys. - * - * The \a next and \a prev buffers are allowed to be the same. - */ -#define gift128b_derive_keys(next, prev) \ - do { \ - /* Key 0 */ \ - uint32_t s = (prev)[0]; \ - uint32_t t = (prev)[1]; \ - gift128b_swap_move(t, t, 0x00003333U, 16); \ - gift128b_swap_move(t, t, 0x55554444U, 1); \ - (next)[0] = t; \ - /* Key 1 */ \ - s = leftRotate8(s & 0x33333333U) | leftRotate16(s & 0xCCCCCCCCU); \ - gift128b_swap_move(s, s, 0x55551100U, 1); \ - (next)[1] = s; \ - /* Key 2 */ \ - s = (prev)[2]; \ - t = (prev)[3]; \ - (next)[2] = ((t >> 4) & 0x0F000F00U) | ((t & 0x0F000F00U) << 4) | \ - ((t >> 6) & 0x00030003U) | ((t & 0x003F003FU) << 2); \ - /* Key 3 */ \ - (next)[3] = ((s >> 6) & 0x03000300U) | ((s & 0x3F003F00U) << 2) | \ - ((s >> 5) & 0x00070007U) | ((s & 0x001F001FU) << 3); \ - /* Key 4 */ \ - s = (prev)[4]; \ - t = (prev)[5]; \ - (next)[4] = leftRotate8(t & 0xAAAAAAAAU) | \ - leftRotate16(t & 0x55555555U); \ - /* Key 5 */ \ - (next)[5] = leftRotate8(s & 0x55555555U) | \ - leftRotate12(s & 0xAAAAAAAAU); \ - /* Key 6 */ \ - s = (prev)[6]; \ - t = (prev)[7]; \ - (next)[6] = ((t >> 2) & 0x03030303U) | ((t & 0x03030303U) << 2) | \ - ((t >> 1) & 0x70707070U) | ((t & 0x10101010U) << 3); \ - /* Key 7 */ \ - (next)[7] = ((s >> 18) & 0x00003030U) | ((s & 0x01010101U) << 3) | \ - ((s >> 14) & 0x0000C0C0U) | ((s & 0x0000E0E0U) << 15) | \ - ((s >> 1) & 0x07070707U) | ((s & 0x00001010U) << 19); \ - /* Key 8 */ \ - s = (prev)[8]; \ - t = (prev)[9]; \ - (next)[8] = ((t >> 4) & 0x0FFF0000U) | ((t & 0x000F0000U) << 12) | \ - ((t >> 8) & 0x000000FFU) | ((t & 0x000000FFU) << 8); \ - /* Key 9 */ \ - (next)[9] = ((s >> 6) & 0x03FF0000U) | ((s & 0x003F0000U) << 10) | \ - ((s >> 4) & 0x00000FFFU) | ((s & 0x0000000FU) << 12); \ - } while (0) - -/** - * \brief Compute the round keys for GIFT-128 in the fixsliced representation. - * - * \param ks Points to the key schedule to initialize. - * \param k0 First key word. - * \param k1 Second key word. - * \param k2 Third key word. - * \param k3 Fourth key word. - */ -static void gift128b_compute_round_keys - (gift128b_key_schedule_t *ks, - uint32_t k0, uint32_t k1, uint32_t k2, uint32_t k3) -{ - unsigned index; - uint32_t temp; - - /* Set the regular key with k0 and k3 pre-swapped for the round function */ - ks->k[0] = k3; - ks->k[1] = k1; - ks->k[2] = k2; - ks->k[3] = k0; - - /* Pre-compute the keys for rounds 3..10 and permute into fixsliced form */ - for (index = 4; index < 20; index += 2) { - ks->k[index] = ks->k[index - 3]; - temp = ks->k[index - 4]; - temp = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - ks->k[index + 1] = temp; - } - for (index = 0; index < 20; index += 10) { - /* Keys 0 and 10 */ - temp = ks->k[index]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index] = temp; - - /* Keys 1 and 11 */ - temp = ks->k[index + 1]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 1] = temp; - - /* Keys 2 and 12 */ - temp = ks->k[index + 2]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 2] = temp; - - /* Keys 3 and 13 */ - temp = ks->k[index + 3]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 3] = temp; - - /* Keys 4 and 14 */ - temp = ks->k[index + 4]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 4] = temp; - - /* Keys 5 and 15 */ - temp = ks->k[index + 5]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 5] = temp; - - /* Keys 6 and 16 */ - temp = ks->k[index + 6]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 6] = temp; - - /* Keys 7 and 17 */ - temp = ks->k[index + 7]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 7] = temp; - - /* Keys 8, 9, 18, and 19 do not need any adjustment */ - } - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - /* Derive the fixsliced keys for the remaining rounds 11..40 */ - for (index = 20; index < 80; index += 10) { - gift128b_derive_keys(ks->k + index, ks->k + index - 20); - } -#endif -} - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - gift128b_compute_round_keys - (ks, be_load_word32(key), be_load_word32(key + 4), - be_load_word32(key + 8), be_load_word32(key + 12)); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission */ - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); -} - -/** - * \brief Performs the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s3 ^= 0xFFFFFFFFU; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s0 ^= 0xFFFFFFFFU; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/** - * \brief Permutes the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 3) & 0x11111111U) | ((s2 & 0x77777777U) << 1); \ - s3 = ((s3 >> 1) & 0x77777777U) | ((s3 & 0x11111111U) << 3); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 4) & 0x0FFF0FFFU) | ((s0 & 0x000F000FU) << 12); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 12) & 0x000F000FU) | ((s2 & 0x0FFF0FFFU) << 4); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s3 = leftRotate16(s3); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 6) & 0x03030303U) | ((s0 & 0x3F3F3F3FU) << 2); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 2) & 0x3F3F3F3FU) | ((s2 & 0x03030303U) << 6); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = rightRotate8(s2); \ - s3 = leftRotate8(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 1) & 0x77777777U) | ((s2 & 0x11111111U) << 3); \ - s3 = ((s3 >> 3) & 0x11111111U) | ((s3 & 0x77777777U) << 1); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 12) & 0x000F000FU) | ((s0 & 0x0FFF0FFFU) << 4); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 4) & 0x0FFF0FFFU) | ((s2 & 0x000F000FU) << 12); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - s3 = leftRotate16(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 2) & 0x3F3F3F3FU) | ((s0 & 0x03030303U) << 6); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 6) & 0x03030303U) | ((s2 & 0x3F3F3F3FU) << 2); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = leftRotate8(s2); \ - s3 = rightRotate8(s3); \ - } while (0); - -/** - * \brief Performs five fixsliced encryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of five rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 5 rounds. - */ -#define gift128b_encrypt_5_rounds(rk, rc) \ - do { \ - /* 1st round - S-box, rotate left, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_1(s0, s1, s2, s3); \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - \ - /* 2nd round - S-box, rotate up, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_2(s0, s1, s2, s3); \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_3(s0, s1, s2, s3); \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - \ - /* 4th round - S-box, rotate left and swap rows, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_4(s0, s1, s2, s3); \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - \ - /* 5th round - S-box, rotate up, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_5(s0, s1, s2, s3); \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - \ - /* Swap s0 and s3 in preparation for the next 1st round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - } while (0) - -/** - * \brief Performs five fixsliced decryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - */ -#define gift128b_decrypt_5_rounds(rk, rc) \ - do { \ - /* Swap s0 and s3 in preparation for the next 5th round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - \ - /* 5th round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - gift128b_inv_permute_state_5(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 4th round - S-box, rotate right and swap rows, add round key */ \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - gift128b_inv_permute_state_4(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - gift128b_inv_permute_state_3(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 2nd round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - gift128b_inv_permute_state_2(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 1st round - S-box, rotate right, add round key */ \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - gift128b_inv_permute_state_1(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - } while (0) - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - /* Mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = be_load_word32(key + 12); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission - * and mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = le_load_word32(key); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key + 12); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if (((round + 1) % 5) == 0 && round < 39) - s0 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -/* The small variant uses fixslicing for encryption, but we need to change - * to bitslicing for decryption because of the difficulty of fast-forwarding - * the fixsliced key schedule to the end. So the tiny variant is used for - * decryption when the small variant is selected. Since the NIST AEAD modes - * for GIFT-128 only use the block encrypt operation, the inefficiencies - * in decryption don't matter all that much */ - -/** - * \def gift128b_load_and_forward_schedule() - * \brief Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 40; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 10 times for the full 40 rounds. The overall - * effect is to apply a "20 right and 40 left" bit-rotation to every - * word in the key schedule. That is equivalent to "4 right and 8 left" - * on the 16-bit sub-words. - */ -#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#else -/* The small variant needs to also undo some of the rotations that were - * done to generate the fixsliced version of the key schedule */ -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ - gift128b_swap_move(w3, w3, 0x00003333U, 18); \ - gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ - gift128b_swap_move(w3, w3, 0x00550055U, 9); \ - gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ - gift128b_swap_move(w1, w1, 0x00003333U, 18); \ - gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ - gift128b_swap_move(w1, w1, 0x00550055U, 9); \ - gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ - gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ - gift128b_swap_move(w2, w2, 0x03030303U, 6); \ - gift128b_swap_move(w2, w2, 0x11111111U, 3); \ - gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ - gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ - gift128b_swap_move(w0, w0, 0x03030303U, 6); \ - gift128b_swap_move(w0, w0, 0x11111111U, 3); \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#endif - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if ((round % 5) == 0 && round < 40) - s0 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.h deleted file mode 100644 index f57d143..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_H -#define LW_INTERNAL_GIFT128_H - -/** - * \file internal-gift128.h - * \brief GIFT-128 block cipher. - * - * There are three versions of GIFT-128 in use within the second round - * submissions to the NIST lightweight cryptography competition. - * - * The most efficient version for 32-bit software implementation is the - * GIFT-128-b bit-sliced version from GIFT-COFB and SUNDAE-GIFT. - * - * The second is the nibble-based version from HYENA. We implement the - * HYENA version as a wrapper around the bit-sliced version. - * - * The third version is a variant on the HYENA nibble-based version that - * includes a 4-bit tweak value for domain separation. It is used by - * the ESTATE submission to NIST. - * - * Technically there is a fourth version of GIFT-128 which is the one that - * appeared in the original GIFT-128 paper. It is almost the same as the - * HYENA version except that the byte ordering is big-endian instead of - * HYENA's little-endian. The original version of GIFT-128 doesn't appear - * in any of the NIST submissions so we don't bother with it in this library. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include -#include "internal-gift128-config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of a GIFT-128 block in bytes. - */ -#define GIFT128_BLOCK_SIZE 16 - -/** - * \var GIFT128_ROUND_KEYS - * \brief Number of round keys for the GIFT-128 key schedule. - */ -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY -#define GIFT128_ROUND_KEYS 4 -#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL -#define GIFT128_ROUND_KEYS 20 -#else -#define GIFT128_ROUND_KEYS 80 -#endif - -/** - * \brief Structure of the key schedule for GIFT-128 (bit-sliced). - */ -typedef struct -{ - /** Pre-computed round keys for bit-sliced GIFT-128 */ - uint32_t k[GIFT128_ROUND_KEYS]; - -} gift128b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced and pre-loaded). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version assumes that the input has already been pre-loaded from - * big-endian into host byte order in the supplied word array. The output - * is delivered in the same way. - */ -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Structure of the key schedule for GIFT-128 (nibble-based). - */ -typedef gift128b_key_schedule_t gift128n_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ -#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ -#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ -#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ -#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ -#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ -#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ -#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ -#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ -#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ -#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ -#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ -#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ -#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ -#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ -#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ -#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ - -/** - * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -/** - * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-avr.S deleted file mode 100644 index 641613a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-avr.S +++ /dev/null @@ -1,2104 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 40 -table_0: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-full-avr.S deleted file mode 100644 index ff11875..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-full-avr.S +++ /dev/null @@ -1,5037 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r13,X+ - ld r12,X+ - ld r11,X+ - ld r10,X+ - ld r5,X+ - ld r4,X+ - ld r3,X+ - ld r2,X+ - ld r9,X+ - ld r8,X+ - ld r7,X+ - ld r6,X+ - ld r29,X+ - ld r28,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - ldi r24,4 -33: - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r29 - ror r28 - ror r0 - lsr r29 - ror r28 - ror r0 - or r29,r0 - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r28 - mov r28,r4 - mov r4,r0 - mov r0,r29 - mov r29,r5 - mov r5,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - mov r0,r6 - mov r6,r10 - mov r10,r0 - mov r0,r7 - mov r7,r11 - mov r11,r0 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - st Z,r29 - std Z+1,r23 - std Z+2,r28 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+4,r29 - std Z+5,r23 - std Z+6,r28 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r28,Z+10 - ldd r29,Z+11 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+8,r29 - std Z+9,r23 - std Z+10,r28 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+12,r29 - std Z+13,r23 - std Z+14,r28 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+16,r29 - std Z+17,r23 - std Z+18,r28 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+20,r29 - std Z+21,r23 - std Z+22,r28 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+24,r29 - std Z+25,r23 - std Z+26,r28 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+28,r29 - std Z+29,r23 - std Z+30,r28 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - adiw r30,40 - movw r26,r30 - subi r26,80 - sbc r27,r1 - ldi r24,6 -1274: - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r2 - eor r19,r3 - andi r18,51 - andi r19,51 - eor r2,r18 - eor r3,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - movw r18,r22 - movw r20,r28 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - andi r28,204 - andi r29,204 - or r28,r21 - or r29,r18 - or r22,r19 - or r23,r20 - movw r18,r28 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r28 - eor r19,r29 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r28 - std Z+5,r29 - std Z+6,r22 - std Z+7,r23 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - swap r3 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - swap r5 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r29 - adc r29,r1 - lsl r29 - adc r29,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r28 - std Z+15,r29 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - ldi r25,85 - and r2,r25 - and r3,r25 - and r4,r25 - and r5,r25 - or r2,r19 - or r3,r20 - or r4,r21 - or r5,r18 - std Z+16,r4 - std Z+17,r5 - std Z+18,r2 - std Z+19,r3 - movw r18,r22 - movw r20,r28 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - andi r28,170 - andi r29,170 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - or r22,r18 - or r23,r19 - or r28,r20 - or r29,r21 - std Z+20,r29 - std Z+21,r22 - std Z+22,r23 - std Z+23,r28 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r14,r18 - movw r16,r20 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - eor r14,r18 - eor r15,r19 - eor r16,r20 - eor r17,r21 - ldi r25,8 - and r14,r25 - and r15,r25 - andi r16,8 - andi r17,8 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - ldi r17,15 - and r2,r17 - and r3,r17 - and r4,r17 - and r5,r17 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - movw r18,r28 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r2,r22 - movw r4,r28 - ldi r16,1 - and r2,r16 - and r3,r16 - and r4,r16 - and r5,r16 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - or r2,r18 - or r3,r19 - movw r18,r28 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r2,r18 - or r3,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r4,r18 - or r5,r19 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r4,r22 - or r5,r23 - std Z+28,r2 - std Z+29,r3 - std Z+30,r4 - std Z+31,r5 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - std Z+32,r3 - std Z+33,r2 - std Z+34,r4 - std Z+35,r5 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r28 - mov r28,r29 - mov r29,r0 - lsl r28 - rol r29 - adc r28,r1 - lsl r28 - rol r29 - adc r28,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r28 - std Z+39,r29 - dec r24 - breq 1733f - adiw r30,40 - rjmp 1274b -1733: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,160 - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rjmp 768f -30: - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r1 - lsr r22 - ror r0 - lsr r22 - ror r0 - or r22,r0 - mov r0,r1 - lsr r23 - ror r0 - lsr r23 - ror r0 - or r23,r0 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - swap r4 - swap r5 - swap r6 - swap r7 - lsl r8 - adc r8,r1 - lsl r8 - adc r8,r1 - lsl r9 - adc r9,r1 - lsl r9 - adc r9,r1 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,119 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,17 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -768: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-small-avr.S deleted file mode 100644 index 77ef9fd..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-small-avr.S +++ /dev/null @@ -1,6053 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -33: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -678: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - cpse r16,r1 - rjmp 678b - rjmp 1175f -830: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1175: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-tiny-avr.S deleted file mode 100644 index e7a03f1..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-gift128b-tiny-avr.S +++ /dev/null @@ -1,6766 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z,r22 - std Z+1,r23 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.c deleted file mode 100644 index d192b8e..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sundae-gift.h" -#include "internal-gift128.h" -#include "internal-util.h" -#include - -aead_cipher_t const sundae_gift_0_cipher = { - "SUNDAE-GIFT-0", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_0_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_0_aead_encrypt, - sundae_gift_0_aead_decrypt -}; - -aead_cipher_t const sundae_gift_64_cipher = { - "SUNDAE-GIFT-64", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_64_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_64_aead_encrypt, - sundae_gift_64_aead_decrypt -}; - -aead_cipher_t const sundae_gift_96_cipher = { - "SUNDAE-GIFT-96", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_96_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_96_aead_encrypt, - sundae_gift_96_aead_decrypt -}; - -aead_cipher_t const sundae_gift_128_cipher = { - "SUNDAE-GIFT-128", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_128_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_128_aead_encrypt, - sundae_gift_128_aead_decrypt -}; - -/* Multiply a block value by 2 in the special byte field */ -STATIC_INLINE void sundae_gift_multiply(unsigned char B[16]) -{ - unsigned char B0 = B[0]; - unsigned index; - for (index = 0; index < 15; ++index) - B[index] = B[index + 1]; - B[15] = B0; - B[10] ^= B0; - B[12] ^= B0; - B[14] ^= B0; -} - -/* Compute a MAC over the concatenation of two data buffers */ -static void sundae_gift_aead_mac - (const gift128b_key_schedule_t *ks, unsigned char V[16], - const unsigned char *data1, unsigned data1len, - const unsigned char *data2, unsigned long data2len) -{ - unsigned len; - - /* Nothing to do if the input is empty */ - if (!data1len && !data2len) - return; - - /* Format the first block. We assume that data1len <= 16 - * as it is will be the nonce if it is non-zero in length */ - lw_xor_block(V, data1, data1len); - len = 16 - data1len; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V + data1len, data2, len); - data2 += len; - data2len -= len; - len += data1len; - - /* Process as many full blocks as we can, except the last */ - while (data2len > 0) { - gift128b_encrypt(ks, V, V); - len = 16; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V, data2, len); - data2 += len; - data2len -= len; - } - - /* Pad and process the last block */ - if (len < 16) { - V[len] ^= 0x80; - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } else { - sundae_gift_multiply(V); - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } -} - -static int sundae_gift_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char P[16]; - - /* Compute the length of the output ciphertext */ - *clen = mlen + SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (mlen > 0) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, T, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, T, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, T, 0, 0, m, mlen); - - /* Encrypt the plaintext to produce the ciphertext. We need to be - * careful how we manage the data because we could be doing in-place - * encryption. In SUNDAE-GIFT, the first 16 bytes of the ciphertext - * is the tag rather than the last 16 bytes in other algorithms. - * We need to swap the plaintext for the current block with the - * ciphertext or tag from the previous block */ - memcpy(V, T, 16); - while (mlen >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(P, V, m, 16); - memcpy(c, T, 16); - memcpy(T, P, 16); - c += 16; - m += 16; - mlen -= 16; - } - if (mlen > 0) { - unsigned leftover = (unsigned)mlen; - gift128b_encrypt(&ks, V, V); - lw_xor_block(V, m, leftover); - memcpy(c, T, 16); - memcpy(c + 16, V, leftover); - } else { - memcpy(c, T, 16); - } - return 0; -} - -static int sundae_gift_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char *mtemp; - unsigned long len; - - /* Bail out if the ciphertext is too short */ - if (clen < SUNDAE_GIFT_TAG_SIZE) - return -1; - len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Decrypt the ciphertext to produce the plaintext, using the - * tag as the initialization vector for the decryption process */ - memcpy(T, c, SUNDAE_GIFT_TAG_SIZE); - c += SUNDAE_GIFT_TAG_SIZE; - mtemp = m; - memcpy(V, T, 16); - while (len >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, 16); - c += 16; - mtemp += 16; - len -= 16; - } - if (len > 0) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, (unsigned)len); - } - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (clen > SUNDAE_GIFT_TAG_SIZE) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, V, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, V, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, V, 0, 0, m, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, T, V, 16); -} - -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} - -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.h deleted file mode 100644 index 9040dd5..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys-avr/sundae-gift.h +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SUNDAE_GIFT_H -#define LWCRYPTO_SUNDAE_GIFT_H - -#include "aead-common.h" - -/** - * \file sundae-gift.h - * \brief SUNDAE-GIFT encryption algorithm family. - * - * The SUNDAE-GIFT family consists of several related algorithms: - * - * \li SUNDAE-GIFT-0 with a 128-bit key, a 0-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-64 with a 128-bit key, a 64-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-96 with a 128-bit key, a 96-bit nonce, and 128-bit tag. - * This is the primary member of the family. - * \li SUNDAE-GIFT-128 with a 128-bit key, a 128-bit nonce, and 128-bit tag. - * - * SUNDAE-GIFT is resistant against nonce reuse as long as the combination - * of the associated data and plaintext is unique. - * - * If a nonce is reused (or there is no nonce in the case of SUNDAE-GIFT-0), - * then two packets with the same associated data and plaintext will encrypt - * to the same ciphertext. This will leak that the same plaintext has been - * sent for a second time but will not reveal the plaintext itself. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-0. - */ -#define SUNDAE_GIFT_0_NONCE_SIZE 0 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-64. - */ -#define SUNDAE_GIFT_64_NONCE_SIZE 8 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-96. - */ -#define SUNDAE_GIFT_96_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-128. - */ -#define SUNDAE_GIFT_128_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the SUNDAE-GIFT-0 cipher. - */ -extern aead_cipher_t const sundae_gift_0_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-64 cipher. - */ -extern aead_cipher_t const sundae_gift_64_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-96 cipher. - */ -extern aead_cipher_t const sundae_gift_96_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-128 cipher. - */ -extern aead_cipher_t const sundae_gift_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_0_aead_decrypt() - */ -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_0_aead_encrypt() - */ -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_64_aead_decrypt() - */ -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_64_aead_encrypt() - */ -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_96_aead_decrypt() - */ -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_96_aead_encrypt() - */ -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_128_aead_decrypt() - */ -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-12896. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_128_aead_encrypt() - */ -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128-config.h new file mode 100644 index 0000000..62131ba --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128-config.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIFT128_CONFIG_H +#define LW_INTERNAL_GIFT128_CONFIG_H + +/** + * \file internal-gift128-config.h + * \brief Configures the variant of GIFT-128 to use. + */ + +/** + * \brief Select the full variant of GIFT-128. + * + * The full variant requires 320 bytes for the key schedule and uses the + * fixslicing method to implement encryption and decryption. + */ +#define GIFT128_VARIANT_FULL 0 + +/** + * \brief Select the small variant of GIFT-128. + * + * The small variant requires 80 bytes for the key schedule. The rest + * of the key schedule is expanded on the fly during encryption. + * + * The fixslicing method is used to implement encryption and the slower + * bitslicing method is used to implement decryption. The small variant + * is suitable when memory is at a premium, decryption is not needed, + * but encryption performance is still important. + */ +#define GIFT128_VARIANT_SMALL 1 + +/** + * \brief Select the tiny variant of GIFT-128. + * + * The tiny variant requires 16 bytes for the key schedule and uses the + * bitslicing method to implement encryption and decryption. It is suitable + * for use when memory is very tight and performance is not critical. + */ +#define GIFT128_VARIANT_TINY 2 + +/** + * \def GIFT128_VARIANT + * \brief Selects the default variant of GIFT-128 to use on this platform. + */ +/** + * \def GIFT128_VARIANT_ASM + * \brief Defined to 1 if the GIFT-128 implementation has been replaced + * with an assembly code version. + */ +#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 1 +#endif +#if !defined(GIFT128_VARIANT) +#define GIFT128_VARIANT GIFT128_VARIANT_FULL +#endif +#if !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 0 +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.c index 681dbc8..c6ac5ec 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.c @@ -23,8 +23,12 @@ #include "internal-gift128.h" #include "internal-util.h" +#if !GIFT128_VARIANT_ASM + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC[40] = { +static uint32_t const GIFT128_RC_fixsliced[40] = { 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, @@ -34,6 +38,246 @@ static uint32_t const GIFT128_RC[40] = { 0xc001a000, 0x14500002, 0x01020181, 0x8000001a }; +#endif + +#if GIFT128_VARIANT != GIFT128_VARIANT_FULL + +/* Round constants for GIFT-128 in the bitsliced representation */ +static uint8_t const GIFT128_RC[40] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, + 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A +}; + +#endif + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 8 bits with respect to the next: + * + * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 + * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 + * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 + * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 + * + * The most efficient permutation from the online generator was P3, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P3 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate8(_x); \ + } while (0) +#define PERM1(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate16(_x); \ + } while (0) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate24(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +#define INV_PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x00550055, 9); \ + bit_permute_step(x, 0x00003333, 18); \ + bit_permute_step(x, 0x000f000f, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate8(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) \ + do { \ + uint32_t _x = rightRotate16(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate24(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = (x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +/** + * \brief Converts the GIFT-128 nibble-based representation into word-based. + * + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. + * + * The \a input and \a output buffers can be the same buffer. + */ +static void gift128n_to_words + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input buffer into 32-bit words. We use the nibble order + * from the HYENA submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-128 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 12); + s1 = le_load_word32(input + 8); + s2 = le_load_word32(input + 4); + s3 = le_load_word32(input); + + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + PERM_WORDS(s2); + PERM_WORDS(s3); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)s2; + output[3] = (uint8_t)s3; + output[4] = (uint8_t)(s0 >> 8); + output[5] = (uint8_t)(s1 >> 8); + output[6] = (uint8_t)(s2 >> 8); + output[7] = (uint8_t)(s3 >> 8); + output[8] = (uint8_t)(s0 >> 16); + output[9] = (uint8_t)(s1 >> 16); + output[10] = (uint8_t)(s2 >> 16); + output[11] = (uint8_t)(s3 >> 16); + output[12] = (uint8_t)(s0 >> 24); + output[13] = (uint8_t)(s1 >> 24); + output[14] = (uint8_t)(s2 >> 24); + output[15] = (uint8_t)(s3 >> 24); +} + +/** + * \brief Converts the GIFT-128 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift128n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s3 contains the least significant */ + s0 = (((uint32_t)(input[12])) << 24) | + (((uint32_t)(input[8])) << 16) | + (((uint32_t)(input[4])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[13])) << 24) | + (((uint32_t)(input[9])) << 16) | + (((uint32_t)(input[5])) << 8) | + ((uint32_t)(input[1])); + s2 = (((uint32_t)(input[14])) << 24) | + (((uint32_t)(input[10])) << 16) | + (((uint32_t)(input[6])) << 8) | + ((uint32_t)(input[2])); + s3 = (((uint32_t)(input[15])) << 24) | + (((uint32_t)(input[11])) << 16) | + (((uint32_t)(input[7])) << 8) | + ((uint32_t)(input[3])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + INV_PERM_WORDS(s2); + INV_PERM_WORDS(s3); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 12, s0); + le_store_word32(output + 8, s1); + le_store_word32(output + 4, s2); + le_store_word32(output, s3); +} + +void gift128n_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_encrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +void gift128n_decrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_decrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /** * \brief Swaps bits within two words. * @@ -202,21 +446,27 @@ static void gift128b_compute_round_keys /* Keys 8, 9, 18, and 19 do not need any adjustment */ } +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL /* Derive the fixsliced keys for the remaining rounds 11..40 */ for (index = 20; index < 80; index += 10) { gift128b_derive_keys(ks->k + index, ks->k + index - 20); } +#endif } -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) { - if (!ks || !key || key_len != 16) - return 0; gift128b_compute_round_keys (ks, be_load_word32(key), be_load_word32(key + 4), be_load_word32(key + 8), be_load_word32(key + 12)); - return 1; +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission */ + gift128b_compute_round_keys + (ks, le_load_word32(key + 12), le_load_word32(key + 8), + le_load_word32(key + 4), le_load_word32(key)); } /** @@ -521,11 +771,37 @@ int gift128b_init gift128b_inv_sbox(s3, s1, s2, s0); \ } while (0) +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) +{ + /* Mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = be_load_word32(key + 12); + ks->k[1] = be_load_word32(key + 4); + ks->k[2] = be_load_word32(key + 8); + ks->k[3] = be_load_word32(key); +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission + * and mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = le_load_word32(key); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key + 12); +} + +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into the state buffer and convert from big endian */ s0 = be_load_word32(input); @@ -534,14 +810,20 @@ void gift128b_encrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -555,6 +837,7 @@ void gift128b_encrypt_preloaded const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into local variables */ s0 = input[0]; @@ -563,14 +846,20 @@ void gift128b_encrypt_preloaded s3 = input[3]; /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer */ output[0] = s0; @@ -579,7 +868,55 @@ void gift128b_encrypt_preloaded output[3] = s3; } -void gift128b_decrypt +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; + uint32_t k[20]; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { @@ -592,14 +929,14 @@ void gift128b_decrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -608,173 +945,308 @@ void gift128b_decrypt be_store_word32(output + 12, s3); } -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { - /* Use the little-endian key byte order from the HYENA submission */ - if (!ks || !key || key_len != 16) - return 0; - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); - return 1; + uint32_t s0, s1, s2, s3; + + /* Copy the plaintext into local variables */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; + + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_encrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); } -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); + /* Copy the plaintext into the state buffer */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -void gift128n_encrypt +void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) + const unsigned char *input, uint32_t tweak) { + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if (((round + 1) % 5) == 0 && round < 39) + s0 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} + uint32_t s0, s1, s2, s3; -/* 4-bit tweak values expanded to 32-bit */ -static uint32_t const GIFT128_tweaks[16] = { - 0x00000000, 0xe1e1e1e1, 0xd2d2d2d2, 0x33333333, - 0xb4b4b4b4, 0x55555555, 0x66666666, 0x87878787, - 0x78787878, 0x99999999, 0xaaaaaaaa, 0x4b4b4b4b, - 0xcccccccc, 0x2d2d2d2d, 0x1e1e1e1e, 0xffffffff -}; + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); -void gift128t_encrypt + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + +void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; - /* Copy the plaintext into the state buffer and convert from nibbles */ + /* Copy the ciphertext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); @@ -782,25 +1254,24 @@ void gift128t_encrypt s3 = be_load_word32(output + 12); /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + * Every 5 rounds except the first we add the tweak value to the state */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - /* Pack the state into the ciphertext buffer in nibble form */ + /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); @@ -808,37 +1279,211 @@ void gift128t_encrypt gift128n_to_nibbles(output, output); } +#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +/* The small variant uses fixslicing for encryption, but we need to change + * to bitslicing for decryption because of the difficulty of fast-forwarding + * the fixsliced key schedule to the end. So the tiny variant is used for + * decryption when the small variant is selected. Since the NIST AEAD modes + * for GIFT-128 only use the block encrypt operation, the inefficiencies + * in decryption don't matter all that much */ + +/** + * \def gift128b_load_and_forward_schedule() + * \brief Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 40; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 10 times for the full 40 rounds. The overall + * effect is to apply a "20 right and 40 left" bit-rotation to every + * word in the key schedule. That is equivalent to "4 right and 8 left" + * on the 16-bit sub-words. + */ +#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#else +/* The small variant needs to also undo some of the rotations that were + * done to generate the fixsliced version of the key schedule */ +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ + gift128b_swap_move(w3, w3, 0x00003333U, 18); \ + gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ + gift128b_swap_move(w3, w3, 0x00550055U, 9); \ + gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ + gift128b_swap_move(w1, w1, 0x00003333U, 18); \ + gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ + gift128b_swap_move(w1, w1, 0x00550055U, 9); \ + gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ + gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ + gift128b_swap_move(w2, w2, 0x03030303U, 6); \ + gift128b_swap_move(w2, w2, 0x11111111U, 3); \ + gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ + gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ + gift128b_swap_move(w0, w0, 0x03030303U, 6); \ + gift128b_swap_move(w0, w0, 0x11111111U, 3); \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#endif + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); + + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Copy the ciphertext into the state buffer and convert from nibbles */ + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); s2 = be_load_word32(output + 8); s3 = be_load_word32(output + 12); - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if ((round % 5) == 0 && round < 40) + s0 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); @@ -847,3 +1492,7 @@ void gift128t_decrypt be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } + +#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.h index 1ac40e5..f57d143 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128.h @@ -47,11 +47,13 @@ * in any of the NIST submissions so we don't bother with it in this library. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ #include #include +#include "internal-gift128-config.h" #ifdef __cplusplus extern "C" { @@ -63,16 +65,23 @@ extern "C" { #define GIFT128_BLOCK_SIZE 16 /** - * \brief Number of round keys for the fixsliced representation of GIFT-128. + * \var GIFT128_ROUND_KEYS + * \brief Number of round keys for the GIFT-128 key schedule. */ +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY +#define GIFT128_ROUND_KEYS 4 +#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL +#define GIFT128_ROUND_KEYS 20 +#else #define GIFT128_ROUND_KEYS 80 +#endif /** * \brief Structure of the key schedule for GIFT-128 (bit-sliced). */ typedef struct { - /** Pre-computed round keys in the fixsliced form */ + /** Pre-computed round keys for bit-sliced GIFT-128 */ uint32_t k[GIFT128_ROUND_KEYS]; } gift128b_key_schedule_t; @@ -81,14 +90,9 @@ typedef struct * \brief Initializes the key schedule for GIFT-128 (bit-sliced). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). @@ -145,14 +149,9 @@ typedef gift128b_key_schedule_t gift128n_key_schedule_t; * \brief Initializes the key schedule for GIFT-128 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). @@ -182,13 +181,31 @@ void gift128n_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); +/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ +#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ +#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ +#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ +#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ +#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ +#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ +#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ +#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ +#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ +#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ +#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ +#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ +#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ +#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ +#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ +#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ + /** * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). * * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -200,7 +217,7 @@ void gift128n_decrypt */ void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); /** * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). @@ -208,7 +225,7 @@ void gift128t_encrypt * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -220,7 +237,7 @@ void gift128t_encrypt */ void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); #ifdef __cplusplus } diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-avr.S new file mode 100644 index 0000000..641613a --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-avr.S @@ -0,0 +1,2104 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 40 +table_0: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-full-avr.S new file mode 100644 index 0000000..ff11875 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-full-avr.S @@ -0,0 +1,5037 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r13,X+ + ld r12,X+ + ld r11,X+ + ld r10,X+ + ld r5,X+ + ld r4,X+ + ld r3,X+ + ld r2,X+ + ld r9,X+ + ld r8,X+ + ld r7,X+ + ld r6,X+ + ld r29,X+ + ld r28,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + ldi r24,4 +33: + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r29 + ror r28 + ror r0 + lsr r29 + ror r28 + ror r0 + or r29,r0 + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r28 + mov r28,r4 + mov r4,r0 + mov r0,r29 + mov r29,r5 + mov r5,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + mov r0,r6 + mov r6,r10 + mov r10,r0 + mov r0,r7 + mov r7,r11 + mov r11,r0 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + st Z,r29 + std Z+1,r23 + std Z+2,r28 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+4,r29 + std Z+5,r23 + std Z+6,r28 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r28,Z+10 + ldd r29,Z+11 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+8,r29 + std Z+9,r23 + std Z+10,r28 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+12,r29 + std Z+13,r23 + std Z+14,r28 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+16,r29 + std Z+17,r23 + std Z+18,r28 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+20,r29 + std Z+21,r23 + std Z+22,r28 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+24,r29 + std Z+25,r23 + std Z+26,r28 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+28,r29 + std Z+29,r23 + std Z+30,r28 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + adiw r30,40 + movw r26,r30 + subi r26,80 + sbc r27,r1 + ldi r24,6 +1274: + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r2 + eor r19,r3 + andi r18,51 + andi r19,51 + eor r2,r18 + eor r3,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + movw r18,r22 + movw r20,r28 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + andi r28,204 + andi r29,204 + or r28,r21 + or r29,r18 + or r22,r19 + or r23,r20 + movw r18,r28 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r28 + eor r19,r29 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r28 + std Z+5,r29 + std Z+6,r22 + std Z+7,r23 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + swap r3 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + swap r5 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r29 + adc r29,r1 + lsl r29 + adc r29,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r28 + std Z+15,r29 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + ldi r25,85 + and r2,r25 + and r3,r25 + and r4,r25 + and r5,r25 + or r2,r19 + or r3,r20 + or r4,r21 + or r5,r18 + std Z+16,r4 + std Z+17,r5 + std Z+18,r2 + std Z+19,r3 + movw r18,r22 + movw r20,r28 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + andi r28,170 + andi r29,170 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + or r22,r18 + or r23,r19 + or r28,r20 + or r29,r21 + std Z+20,r29 + std Z+21,r22 + std Z+22,r23 + std Z+23,r28 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r14,r18 + movw r16,r20 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + eor r14,r18 + eor r15,r19 + eor r16,r20 + eor r17,r21 + ldi r25,8 + and r14,r25 + and r15,r25 + andi r16,8 + andi r17,8 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + ldi r17,15 + and r2,r17 + and r3,r17 + and r4,r17 + and r5,r17 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + movw r18,r28 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r2,r22 + movw r4,r28 + ldi r16,1 + and r2,r16 + and r3,r16 + and r4,r16 + and r5,r16 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + or r2,r18 + or r3,r19 + movw r18,r28 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r2,r18 + or r3,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r4,r18 + or r5,r19 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r4,r22 + or r5,r23 + std Z+28,r2 + std Z+29,r3 + std Z+30,r4 + std Z+31,r5 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + std Z+32,r3 + std Z+33,r2 + std Z+34,r4 + std Z+35,r5 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r28 + mov r28,r29 + mov r29,r0 + lsl r28 + rol r29 + adc r28,r1 + lsl r28 + rol r29 + adc r28,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r28 + std Z+39,r29 + dec r24 + breq 1733f + adiw r30,40 + rjmp 1274b +1733: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,160 + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rjmp 768f +30: + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r1 + lsr r22 + ror r0 + lsr r22 + ror r0 + or r22,r0 + mov r0,r1 + lsr r23 + ror r0 + lsr r23 + ror r0 + or r23,r0 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + swap r4 + swap r5 + swap r6 + swap r7 + lsl r8 + adc r8,r1 + lsl r8 + adc r8,r1 + lsl r9 + adc r9,r1 + lsl r9 + adc r9,r1 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,119 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,17 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +768: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-small-avr.S new file mode 100644 index 0000000..77ef9fd --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-small-avr.S @@ -0,0 +1,6053 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +33: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +678: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + cpse r16,r1 + rjmp 678b + rjmp 1175f +830: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1175: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-tiny-avr.S new file mode 100644 index 0000000..e7a03f1 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-gift128b-tiny-avr.S @@ -0,0 +1,6766 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z,r22 + std Z+1,r23 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-util.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/sundae-gift.c index 984a4db..d192b8e 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/sundae-gift.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift0v1/rhys/sundae-gift.c @@ -140,8 +140,7 @@ static int sundae_gift_aead_encrypt *clen = mlen + SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Format and encrypt the initial domain separation block */ if (adlen > 0) @@ -205,8 +204,7 @@ static int sundae_gift_aead_decrypt len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Decrypt the ciphertext to produce the plaintext, using the * tag as the initialization vector for the decryption process */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.c b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/api.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/encrypt.c b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/encrypt.c deleted file mode 100644 index b177c18..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sundae-gift.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128-config.h deleted file mode 100644 index 62131ba..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128-config.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_CONFIG_H -#define LW_INTERNAL_GIFT128_CONFIG_H - -/** - * \file internal-gift128-config.h - * \brief Configures the variant of GIFT-128 to use. - */ - -/** - * \brief Select the full variant of GIFT-128. - * - * The full variant requires 320 bytes for the key schedule and uses the - * fixslicing method to implement encryption and decryption. - */ -#define GIFT128_VARIANT_FULL 0 - -/** - * \brief Select the small variant of GIFT-128. - * - * The small variant requires 80 bytes for the key schedule. The rest - * of the key schedule is expanded on the fly during encryption. - * - * The fixslicing method is used to implement encryption and the slower - * bitslicing method is used to implement decryption. The small variant - * is suitable when memory is at a premium, decryption is not needed, - * but encryption performance is still important. - */ -#define GIFT128_VARIANT_SMALL 1 - -/** - * \brief Select the tiny variant of GIFT-128. - * - * The tiny variant requires 16 bytes for the key schedule and uses the - * bitslicing method to implement encryption and decryption. It is suitable - * for use when memory is very tight and performance is not critical. - */ -#define GIFT128_VARIANT_TINY 2 - -/** - * \def GIFT128_VARIANT - * \brief Selects the default variant of GIFT-128 to use on this platform. - */ -/** - * \def GIFT128_VARIANT_ASM - * \brief Defined to 1 if the GIFT-128 implementation has been replaced - * with an assembly code version. - */ -#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 1 -#endif -#if !defined(GIFT128_VARIANT) -#define GIFT128_VARIANT GIFT128_VARIANT_FULL -#endif -#if !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 0 -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.c deleted file mode 100644 index c6ac5ec..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift128.h" -#include "internal-util.h" - -#if !GIFT128_VARIANT_ASM - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC_fixsliced[40] = { - 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, - 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, - 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, - 0x03020180, 0x8000002b, 0x10080880, 0x60014000, 0x01400002, 0x02020080, - 0x80000021, 0x10000080, 0x0001c000, 0x51000002, 0x03010180, 0x8000002e, - 0x10088800, 0x60012000, 0x40500002, 0x01030080, 0x80000006, 0x10008808, - 0xc001a000, 0x14500002, 0x01020181, 0x8000001a -}; - -#endif - -#if GIFT128_VARIANT != GIFT128_VARIANT_FULL - -/* Round constants for GIFT-128 in the bitsliced representation */ -static uint8_t const GIFT128_RC[40] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, - 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A -}; - -#endif - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 8 bits with respect to the next: - * - * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 - * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 - * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 - * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 - * - * The most efficient permutation from the online generator was P3, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P3 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate8(_x); \ - } while (0) -#define PERM1(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate16(_x); \ - } while (0) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate24(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -#define INV_PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x00550055, 9); \ - bit_permute_step(x, 0x00003333, 18); \ - bit_permute_step(x, 0x000f000f, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate8(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) \ - do { \ - uint32_t _x = rightRotate16(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate24(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = (x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); -} - -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); -} - -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift128b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t tmp = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= tmp; \ - (a) ^= tmp << (shift); \ - } while (0) - -/** - * \brief Derives the next 10 fixsliced keys in the key schedule. - * - * \param next Points to the buffer to receive the next 10 keys. - * \param prev Points to the buffer holding the previous 10 keys. - * - * The \a next and \a prev buffers are allowed to be the same. - */ -#define gift128b_derive_keys(next, prev) \ - do { \ - /* Key 0 */ \ - uint32_t s = (prev)[0]; \ - uint32_t t = (prev)[1]; \ - gift128b_swap_move(t, t, 0x00003333U, 16); \ - gift128b_swap_move(t, t, 0x55554444U, 1); \ - (next)[0] = t; \ - /* Key 1 */ \ - s = leftRotate8(s & 0x33333333U) | leftRotate16(s & 0xCCCCCCCCU); \ - gift128b_swap_move(s, s, 0x55551100U, 1); \ - (next)[1] = s; \ - /* Key 2 */ \ - s = (prev)[2]; \ - t = (prev)[3]; \ - (next)[2] = ((t >> 4) & 0x0F000F00U) | ((t & 0x0F000F00U) << 4) | \ - ((t >> 6) & 0x00030003U) | ((t & 0x003F003FU) << 2); \ - /* Key 3 */ \ - (next)[3] = ((s >> 6) & 0x03000300U) | ((s & 0x3F003F00U) << 2) | \ - ((s >> 5) & 0x00070007U) | ((s & 0x001F001FU) << 3); \ - /* Key 4 */ \ - s = (prev)[4]; \ - t = (prev)[5]; \ - (next)[4] = leftRotate8(t & 0xAAAAAAAAU) | \ - leftRotate16(t & 0x55555555U); \ - /* Key 5 */ \ - (next)[5] = leftRotate8(s & 0x55555555U) | \ - leftRotate12(s & 0xAAAAAAAAU); \ - /* Key 6 */ \ - s = (prev)[6]; \ - t = (prev)[7]; \ - (next)[6] = ((t >> 2) & 0x03030303U) | ((t & 0x03030303U) << 2) | \ - ((t >> 1) & 0x70707070U) | ((t & 0x10101010U) << 3); \ - /* Key 7 */ \ - (next)[7] = ((s >> 18) & 0x00003030U) | ((s & 0x01010101U) << 3) | \ - ((s >> 14) & 0x0000C0C0U) | ((s & 0x0000E0E0U) << 15) | \ - ((s >> 1) & 0x07070707U) | ((s & 0x00001010U) << 19); \ - /* Key 8 */ \ - s = (prev)[8]; \ - t = (prev)[9]; \ - (next)[8] = ((t >> 4) & 0x0FFF0000U) | ((t & 0x000F0000U) << 12) | \ - ((t >> 8) & 0x000000FFU) | ((t & 0x000000FFU) << 8); \ - /* Key 9 */ \ - (next)[9] = ((s >> 6) & 0x03FF0000U) | ((s & 0x003F0000U) << 10) | \ - ((s >> 4) & 0x00000FFFU) | ((s & 0x0000000FU) << 12); \ - } while (0) - -/** - * \brief Compute the round keys for GIFT-128 in the fixsliced representation. - * - * \param ks Points to the key schedule to initialize. - * \param k0 First key word. - * \param k1 Second key word. - * \param k2 Third key word. - * \param k3 Fourth key word. - */ -static void gift128b_compute_round_keys - (gift128b_key_schedule_t *ks, - uint32_t k0, uint32_t k1, uint32_t k2, uint32_t k3) -{ - unsigned index; - uint32_t temp; - - /* Set the regular key with k0 and k3 pre-swapped for the round function */ - ks->k[0] = k3; - ks->k[1] = k1; - ks->k[2] = k2; - ks->k[3] = k0; - - /* Pre-compute the keys for rounds 3..10 and permute into fixsliced form */ - for (index = 4; index < 20; index += 2) { - ks->k[index] = ks->k[index - 3]; - temp = ks->k[index - 4]; - temp = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - ks->k[index + 1] = temp; - } - for (index = 0; index < 20; index += 10) { - /* Keys 0 and 10 */ - temp = ks->k[index]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index] = temp; - - /* Keys 1 and 11 */ - temp = ks->k[index + 1]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 1] = temp; - - /* Keys 2 and 12 */ - temp = ks->k[index + 2]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 2] = temp; - - /* Keys 3 and 13 */ - temp = ks->k[index + 3]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 3] = temp; - - /* Keys 4 and 14 */ - temp = ks->k[index + 4]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 4] = temp; - - /* Keys 5 and 15 */ - temp = ks->k[index + 5]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 5] = temp; - - /* Keys 6 and 16 */ - temp = ks->k[index + 6]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 6] = temp; - - /* Keys 7 and 17 */ - temp = ks->k[index + 7]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 7] = temp; - - /* Keys 8, 9, 18, and 19 do not need any adjustment */ - } - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - /* Derive the fixsliced keys for the remaining rounds 11..40 */ - for (index = 20; index < 80; index += 10) { - gift128b_derive_keys(ks->k + index, ks->k + index - 20); - } -#endif -} - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - gift128b_compute_round_keys - (ks, be_load_word32(key), be_load_word32(key + 4), - be_load_word32(key + 8), be_load_word32(key + 12)); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission */ - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); -} - -/** - * \brief Performs the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s3 ^= 0xFFFFFFFFU; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s0 ^= 0xFFFFFFFFU; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/** - * \brief Permutes the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 3) & 0x11111111U) | ((s2 & 0x77777777U) << 1); \ - s3 = ((s3 >> 1) & 0x77777777U) | ((s3 & 0x11111111U) << 3); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 4) & 0x0FFF0FFFU) | ((s0 & 0x000F000FU) << 12); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 12) & 0x000F000FU) | ((s2 & 0x0FFF0FFFU) << 4); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s3 = leftRotate16(s3); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 6) & 0x03030303U) | ((s0 & 0x3F3F3F3FU) << 2); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 2) & 0x3F3F3F3FU) | ((s2 & 0x03030303U) << 6); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = rightRotate8(s2); \ - s3 = leftRotate8(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 1) & 0x77777777U) | ((s2 & 0x11111111U) << 3); \ - s3 = ((s3 >> 3) & 0x11111111U) | ((s3 & 0x77777777U) << 1); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 12) & 0x000F000FU) | ((s0 & 0x0FFF0FFFU) << 4); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 4) & 0x0FFF0FFFU) | ((s2 & 0x000F000FU) << 12); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - s3 = leftRotate16(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 2) & 0x3F3F3F3FU) | ((s0 & 0x03030303U) << 6); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 6) & 0x03030303U) | ((s2 & 0x3F3F3F3FU) << 2); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = leftRotate8(s2); \ - s3 = rightRotate8(s3); \ - } while (0); - -/** - * \brief Performs five fixsliced encryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of five rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 5 rounds. - */ -#define gift128b_encrypt_5_rounds(rk, rc) \ - do { \ - /* 1st round - S-box, rotate left, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_1(s0, s1, s2, s3); \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - \ - /* 2nd round - S-box, rotate up, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_2(s0, s1, s2, s3); \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_3(s0, s1, s2, s3); \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - \ - /* 4th round - S-box, rotate left and swap rows, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_4(s0, s1, s2, s3); \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - \ - /* 5th round - S-box, rotate up, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_5(s0, s1, s2, s3); \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - \ - /* Swap s0 and s3 in preparation for the next 1st round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - } while (0) - -/** - * \brief Performs five fixsliced decryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - */ -#define gift128b_decrypt_5_rounds(rk, rc) \ - do { \ - /* Swap s0 and s3 in preparation for the next 5th round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - \ - /* 5th round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - gift128b_inv_permute_state_5(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 4th round - S-box, rotate right and swap rows, add round key */ \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - gift128b_inv_permute_state_4(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - gift128b_inv_permute_state_3(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 2nd round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - gift128b_inv_permute_state_2(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 1st round - S-box, rotate right, add round key */ \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - gift128b_inv_permute_state_1(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - } while (0) - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - /* Mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = be_load_word32(key + 12); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission - * and mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = le_load_word32(key); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key + 12); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if (((round + 1) % 5) == 0 && round < 39) - s0 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -/* The small variant uses fixslicing for encryption, but we need to change - * to bitslicing for decryption because of the difficulty of fast-forwarding - * the fixsliced key schedule to the end. So the tiny variant is used for - * decryption when the small variant is selected. Since the NIST AEAD modes - * for GIFT-128 only use the block encrypt operation, the inefficiencies - * in decryption don't matter all that much */ - -/** - * \def gift128b_load_and_forward_schedule() - * \brief Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 40; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 10 times for the full 40 rounds. The overall - * effect is to apply a "20 right and 40 left" bit-rotation to every - * word in the key schedule. That is equivalent to "4 right and 8 left" - * on the 16-bit sub-words. - */ -#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#else -/* The small variant needs to also undo some of the rotations that were - * done to generate the fixsliced version of the key schedule */ -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ - gift128b_swap_move(w3, w3, 0x00003333U, 18); \ - gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ - gift128b_swap_move(w3, w3, 0x00550055U, 9); \ - gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ - gift128b_swap_move(w1, w1, 0x00003333U, 18); \ - gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ - gift128b_swap_move(w1, w1, 0x00550055U, 9); \ - gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ - gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ - gift128b_swap_move(w2, w2, 0x03030303U, 6); \ - gift128b_swap_move(w2, w2, 0x11111111U, 3); \ - gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ - gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ - gift128b_swap_move(w0, w0, 0x03030303U, 6); \ - gift128b_swap_move(w0, w0, 0x11111111U, 3); \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#endif - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if ((round % 5) == 0 && round < 40) - s0 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.h deleted file mode 100644 index f57d143..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_H -#define LW_INTERNAL_GIFT128_H - -/** - * \file internal-gift128.h - * \brief GIFT-128 block cipher. - * - * There are three versions of GIFT-128 in use within the second round - * submissions to the NIST lightweight cryptography competition. - * - * The most efficient version for 32-bit software implementation is the - * GIFT-128-b bit-sliced version from GIFT-COFB and SUNDAE-GIFT. - * - * The second is the nibble-based version from HYENA. We implement the - * HYENA version as a wrapper around the bit-sliced version. - * - * The third version is a variant on the HYENA nibble-based version that - * includes a 4-bit tweak value for domain separation. It is used by - * the ESTATE submission to NIST. - * - * Technically there is a fourth version of GIFT-128 which is the one that - * appeared in the original GIFT-128 paper. It is almost the same as the - * HYENA version except that the byte ordering is big-endian instead of - * HYENA's little-endian. The original version of GIFT-128 doesn't appear - * in any of the NIST submissions so we don't bother with it in this library. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include -#include "internal-gift128-config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of a GIFT-128 block in bytes. - */ -#define GIFT128_BLOCK_SIZE 16 - -/** - * \var GIFT128_ROUND_KEYS - * \brief Number of round keys for the GIFT-128 key schedule. - */ -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY -#define GIFT128_ROUND_KEYS 4 -#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL -#define GIFT128_ROUND_KEYS 20 -#else -#define GIFT128_ROUND_KEYS 80 -#endif - -/** - * \brief Structure of the key schedule for GIFT-128 (bit-sliced). - */ -typedef struct -{ - /** Pre-computed round keys for bit-sliced GIFT-128 */ - uint32_t k[GIFT128_ROUND_KEYS]; - -} gift128b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced and pre-loaded). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version assumes that the input has already been pre-loaded from - * big-endian into host byte order in the supplied word array. The output - * is delivered in the same way. - */ -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Structure of the key schedule for GIFT-128 (nibble-based). - */ -typedef gift128b_key_schedule_t gift128n_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ -#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ -#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ -#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ -#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ -#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ -#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ -#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ -#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ -#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ -#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ -#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ -#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ -#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ -#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ -#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ -#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ - -/** - * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -/** - * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-avr.S deleted file mode 100644 index 641613a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-avr.S +++ /dev/null @@ -1,2104 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 40 -table_0: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-full-avr.S deleted file mode 100644 index ff11875..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-full-avr.S +++ /dev/null @@ -1,5037 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r13,X+ - ld r12,X+ - ld r11,X+ - ld r10,X+ - ld r5,X+ - ld r4,X+ - ld r3,X+ - ld r2,X+ - ld r9,X+ - ld r8,X+ - ld r7,X+ - ld r6,X+ - ld r29,X+ - ld r28,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - ldi r24,4 -33: - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r29 - ror r28 - ror r0 - lsr r29 - ror r28 - ror r0 - or r29,r0 - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r28 - mov r28,r4 - mov r4,r0 - mov r0,r29 - mov r29,r5 - mov r5,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - mov r0,r6 - mov r6,r10 - mov r10,r0 - mov r0,r7 - mov r7,r11 - mov r11,r0 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - st Z,r29 - std Z+1,r23 - std Z+2,r28 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+4,r29 - std Z+5,r23 - std Z+6,r28 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r28,Z+10 - ldd r29,Z+11 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+8,r29 - std Z+9,r23 - std Z+10,r28 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+12,r29 - std Z+13,r23 - std Z+14,r28 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+16,r29 - std Z+17,r23 - std Z+18,r28 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+20,r29 - std Z+21,r23 - std Z+22,r28 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+24,r29 - std Z+25,r23 - std Z+26,r28 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+28,r29 - std Z+29,r23 - std Z+30,r28 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - adiw r30,40 - movw r26,r30 - subi r26,80 - sbc r27,r1 - ldi r24,6 -1274: - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r2 - eor r19,r3 - andi r18,51 - andi r19,51 - eor r2,r18 - eor r3,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - movw r18,r22 - movw r20,r28 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - andi r28,204 - andi r29,204 - or r28,r21 - or r29,r18 - or r22,r19 - or r23,r20 - movw r18,r28 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r28 - eor r19,r29 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r28 - std Z+5,r29 - std Z+6,r22 - std Z+7,r23 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - swap r3 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - swap r5 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r29 - adc r29,r1 - lsl r29 - adc r29,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r28 - std Z+15,r29 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - ldi r25,85 - and r2,r25 - and r3,r25 - and r4,r25 - and r5,r25 - or r2,r19 - or r3,r20 - or r4,r21 - or r5,r18 - std Z+16,r4 - std Z+17,r5 - std Z+18,r2 - std Z+19,r3 - movw r18,r22 - movw r20,r28 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - andi r28,170 - andi r29,170 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - or r22,r18 - or r23,r19 - or r28,r20 - or r29,r21 - std Z+20,r29 - std Z+21,r22 - std Z+22,r23 - std Z+23,r28 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r14,r18 - movw r16,r20 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - eor r14,r18 - eor r15,r19 - eor r16,r20 - eor r17,r21 - ldi r25,8 - and r14,r25 - and r15,r25 - andi r16,8 - andi r17,8 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - ldi r17,15 - and r2,r17 - and r3,r17 - and r4,r17 - and r5,r17 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - movw r18,r28 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r2,r22 - movw r4,r28 - ldi r16,1 - and r2,r16 - and r3,r16 - and r4,r16 - and r5,r16 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - or r2,r18 - or r3,r19 - movw r18,r28 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r2,r18 - or r3,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r4,r18 - or r5,r19 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r4,r22 - or r5,r23 - std Z+28,r2 - std Z+29,r3 - std Z+30,r4 - std Z+31,r5 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - std Z+32,r3 - std Z+33,r2 - std Z+34,r4 - std Z+35,r5 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r28 - mov r28,r29 - mov r29,r0 - lsl r28 - rol r29 - adc r28,r1 - lsl r28 - rol r29 - adc r28,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r28 - std Z+39,r29 - dec r24 - breq 1733f - adiw r30,40 - rjmp 1274b -1733: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,160 - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rjmp 768f -30: - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r1 - lsr r22 - ror r0 - lsr r22 - ror r0 - or r22,r0 - mov r0,r1 - lsr r23 - ror r0 - lsr r23 - ror r0 - or r23,r0 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - swap r4 - swap r5 - swap r6 - swap r7 - lsl r8 - adc r8,r1 - lsl r8 - adc r8,r1 - lsl r9 - adc r9,r1 - lsl r9 - adc r9,r1 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,119 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,17 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -768: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-small-avr.S deleted file mode 100644 index 77ef9fd..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-small-avr.S +++ /dev/null @@ -1,6053 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -33: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -678: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - cpse r16,r1 - rjmp 678b - rjmp 1175f -830: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1175: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-tiny-avr.S deleted file mode 100644 index e7a03f1..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-gift128b-tiny-avr.S +++ /dev/null @@ -1,6766 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z,r22 - std Z+1,r23 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.c deleted file mode 100644 index d192b8e..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sundae-gift.h" -#include "internal-gift128.h" -#include "internal-util.h" -#include - -aead_cipher_t const sundae_gift_0_cipher = { - "SUNDAE-GIFT-0", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_0_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_0_aead_encrypt, - sundae_gift_0_aead_decrypt -}; - -aead_cipher_t const sundae_gift_64_cipher = { - "SUNDAE-GIFT-64", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_64_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_64_aead_encrypt, - sundae_gift_64_aead_decrypt -}; - -aead_cipher_t const sundae_gift_96_cipher = { - "SUNDAE-GIFT-96", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_96_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_96_aead_encrypt, - sundae_gift_96_aead_decrypt -}; - -aead_cipher_t const sundae_gift_128_cipher = { - "SUNDAE-GIFT-128", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_128_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_128_aead_encrypt, - sundae_gift_128_aead_decrypt -}; - -/* Multiply a block value by 2 in the special byte field */ -STATIC_INLINE void sundae_gift_multiply(unsigned char B[16]) -{ - unsigned char B0 = B[0]; - unsigned index; - for (index = 0; index < 15; ++index) - B[index] = B[index + 1]; - B[15] = B0; - B[10] ^= B0; - B[12] ^= B0; - B[14] ^= B0; -} - -/* Compute a MAC over the concatenation of two data buffers */ -static void sundae_gift_aead_mac - (const gift128b_key_schedule_t *ks, unsigned char V[16], - const unsigned char *data1, unsigned data1len, - const unsigned char *data2, unsigned long data2len) -{ - unsigned len; - - /* Nothing to do if the input is empty */ - if (!data1len && !data2len) - return; - - /* Format the first block. We assume that data1len <= 16 - * as it is will be the nonce if it is non-zero in length */ - lw_xor_block(V, data1, data1len); - len = 16 - data1len; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V + data1len, data2, len); - data2 += len; - data2len -= len; - len += data1len; - - /* Process as many full blocks as we can, except the last */ - while (data2len > 0) { - gift128b_encrypt(ks, V, V); - len = 16; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V, data2, len); - data2 += len; - data2len -= len; - } - - /* Pad and process the last block */ - if (len < 16) { - V[len] ^= 0x80; - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } else { - sundae_gift_multiply(V); - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } -} - -static int sundae_gift_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char P[16]; - - /* Compute the length of the output ciphertext */ - *clen = mlen + SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (mlen > 0) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, T, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, T, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, T, 0, 0, m, mlen); - - /* Encrypt the plaintext to produce the ciphertext. We need to be - * careful how we manage the data because we could be doing in-place - * encryption. In SUNDAE-GIFT, the first 16 bytes of the ciphertext - * is the tag rather than the last 16 bytes in other algorithms. - * We need to swap the plaintext for the current block with the - * ciphertext or tag from the previous block */ - memcpy(V, T, 16); - while (mlen >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(P, V, m, 16); - memcpy(c, T, 16); - memcpy(T, P, 16); - c += 16; - m += 16; - mlen -= 16; - } - if (mlen > 0) { - unsigned leftover = (unsigned)mlen; - gift128b_encrypt(&ks, V, V); - lw_xor_block(V, m, leftover); - memcpy(c, T, 16); - memcpy(c + 16, V, leftover); - } else { - memcpy(c, T, 16); - } - return 0; -} - -static int sundae_gift_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char *mtemp; - unsigned long len; - - /* Bail out if the ciphertext is too short */ - if (clen < SUNDAE_GIFT_TAG_SIZE) - return -1; - len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Decrypt the ciphertext to produce the plaintext, using the - * tag as the initialization vector for the decryption process */ - memcpy(T, c, SUNDAE_GIFT_TAG_SIZE); - c += SUNDAE_GIFT_TAG_SIZE; - mtemp = m; - memcpy(V, T, 16); - while (len >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, 16); - c += 16; - mtemp += 16; - len -= 16; - } - if (len > 0) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, (unsigned)len); - } - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (clen > SUNDAE_GIFT_TAG_SIZE) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, V, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, V, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, V, 0, 0, m, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, T, V, 16); -} - -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} - -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.h deleted file mode 100644 index 9040dd5..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys-avr/sundae-gift.h +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SUNDAE_GIFT_H -#define LWCRYPTO_SUNDAE_GIFT_H - -#include "aead-common.h" - -/** - * \file sundae-gift.h - * \brief SUNDAE-GIFT encryption algorithm family. - * - * The SUNDAE-GIFT family consists of several related algorithms: - * - * \li SUNDAE-GIFT-0 with a 128-bit key, a 0-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-64 with a 128-bit key, a 64-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-96 with a 128-bit key, a 96-bit nonce, and 128-bit tag. - * This is the primary member of the family. - * \li SUNDAE-GIFT-128 with a 128-bit key, a 128-bit nonce, and 128-bit tag. - * - * SUNDAE-GIFT is resistant against nonce reuse as long as the combination - * of the associated data and plaintext is unique. - * - * If a nonce is reused (or there is no nonce in the case of SUNDAE-GIFT-0), - * then two packets with the same associated data and plaintext will encrypt - * to the same ciphertext. This will leak that the same plaintext has been - * sent for a second time but will not reveal the plaintext itself. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-0. - */ -#define SUNDAE_GIFT_0_NONCE_SIZE 0 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-64. - */ -#define SUNDAE_GIFT_64_NONCE_SIZE 8 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-96. - */ -#define SUNDAE_GIFT_96_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-128. - */ -#define SUNDAE_GIFT_128_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the SUNDAE-GIFT-0 cipher. - */ -extern aead_cipher_t const sundae_gift_0_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-64 cipher. - */ -extern aead_cipher_t const sundae_gift_64_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-96 cipher. - */ -extern aead_cipher_t const sundae_gift_96_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-128 cipher. - */ -extern aead_cipher_t const sundae_gift_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_0_aead_decrypt() - */ -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_0_aead_encrypt() - */ -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_64_aead_decrypt() - */ -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_64_aead_encrypt() - */ -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_96_aead_decrypt() - */ -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_96_aead_encrypt() - */ -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_128_aead_decrypt() - */ -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-12896. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_128_aead_encrypt() - */ -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128-config.h new file mode 100644 index 0000000..62131ba --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128-config.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIFT128_CONFIG_H +#define LW_INTERNAL_GIFT128_CONFIG_H + +/** + * \file internal-gift128-config.h + * \brief Configures the variant of GIFT-128 to use. + */ + +/** + * \brief Select the full variant of GIFT-128. + * + * The full variant requires 320 bytes for the key schedule and uses the + * fixslicing method to implement encryption and decryption. + */ +#define GIFT128_VARIANT_FULL 0 + +/** + * \brief Select the small variant of GIFT-128. + * + * The small variant requires 80 bytes for the key schedule. The rest + * of the key schedule is expanded on the fly during encryption. + * + * The fixslicing method is used to implement encryption and the slower + * bitslicing method is used to implement decryption. The small variant + * is suitable when memory is at a premium, decryption is not needed, + * but encryption performance is still important. + */ +#define GIFT128_VARIANT_SMALL 1 + +/** + * \brief Select the tiny variant of GIFT-128. + * + * The tiny variant requires 16 bytes for the key schedule and uses the + * bitslicing method to implement encryption and decryption. It is suitable + * for use when memory is very tight and performance is not critical. + */ +#define GIFT128_VARIANT_TINY 2 + +/** + * \def GIFT128_VARIANT + * \brief Selects the default variant of GIFT-128 to use on this platform. + */ +/** + * \def GIFT128_VARIANT_ASM + * \brief Defined to 1 if the GIFT-128 implementation has been replaced + * with an assembly code version. + */ +#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 1 +#endif +#if !defined(GIFT128_VARIANT) +#define GIFT128_VARIANT GIFT128_VARIANT_FULL +#endif +#if !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 0 +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.c index 681dbc8..c6ac5ec 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.c @@ -23,8 +23,12 @@ #include "internal-gift128.h" #include "internal-util.h" +#if !GIFT128_VARIANT_ASM + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC[40] = { +static uint32_t const GIFT128_RC_fixsliced[40] = { 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, @@ -34,6 +38,246 @@ static uint32_t const GIFT128_RC[40] = { 0xc001a000, 0x14500002, 0x01020181, 0x8000001a }; +#endif + +#if GIFT128_VARIANT != GIFT128_VARIANT_FULL + +/* Round constants for GIFT-128 in the bitsliced representation */ +static uint8_t const GIFT128_RC[40] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, + 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A +}; + +#endif + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 8 bits with respect to the next: + * + * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 + * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 + * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 + * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 + * + * The most efficient permutation from the online generator was P3, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P3 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate8(_x); \ + } while (0) +#define PERM1(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate16(_x); \ + } while (0) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate24(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +#define INV_PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x00550055, 9); \ + bit_permute_step(x, 0x00003333, 18); \ + bit_permute_step(x, 0x000f000f, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate8(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) \ + do { \ + uint32_t _x = rightRotate16(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate24(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = (x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +/** + * \brief Converts the GIFT-128 nibble-based representation into word-based. + * + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. + * + * The \a input and \a output buffers can be the same buffer. + */ +static void gift128n_to_words + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input buffer into 32-bit words. We use the nibble order + * from the HYENA submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-128 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 12); + s1 = le_load_word32(input + 8); + s2 = le_load_word32(input + 4); + s3 = le_load_word32(input); + + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + PERM_WORDS(s2); + PERM_WORDS(s3); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)s2; + output[3] = (uint8_t)s3; + output[4] = (uint8_t)(s0 >> 8); + output[5] = (uint8_t)(s1 >> 8); + output[6] = (uint8_t)(s2 >> 8); + output[7] = (uint8_t)(s3 >> 8); + output[8] = (uint8_t)(s0 >> 16); + output[9] = (uint8_t)(s1 >> 16); + output[10] = (uint8_t)(s2 >> 16); + output[11] = (uint8_t)(s3 >> 16); + output[12] = (uint8_t)(s0 >> 24); + output[13] = (uint8_t)(s1 >> 24); + output[14] = (uint8_t)(s2 >> 24); + output[15] = (uint8_t)(s3 >> 24); +} + +/** + * \brief Converts the GIFT-128 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift128n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s3 contains the least significant */ + s0 = (((uint32_t)(input[12])) << 24) | + (((uint32_t)(input[8])) << 16) | + (((uint32_t)(input[4])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[13])) << 24) | + (((uint32_t)(input[9])) << 16) | + (((uint32_t)(input[5])) << 8) | + ((uint32_t)(input[1])); + s2 = (((uint32_t)(input[14])) << 24) | + (((uint32_t)(input[10])) << 16) | + (((uint32_t)(input[6])) << 8) | + ((uint32_t)(input[2])); + s3 = (((uint32_t)(input[15])) << 24) | + (((uint32_t)(input[11])) << 16) | + (((uint32_t)(input[7])) << 8) | + ((uint32_t)(input[3])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + INV_PERM_WORDS(s2); + INV_PERM_WORDS(s3); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 12, s0); + le_store_word32(output + 8, s1); + le_store_word32(output + 4, s2); + le_store_word32(output, s3); +} + +void gift128n_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_encrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +void gift128n_decrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_decrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /** * \brief Swaps bits within two words. * @@ -202,21 +446,27 @@ static void gift128b_compute_round_keys /* Keys 8, 9, 18, and 19 do not need any adjustment */ } +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL /* Derive the fixsliced keys for the remaining rounds 11..40 */ for (index = 20; index < 80; index += 10) { gift128b_derive_keys(ks->k + index, ks->k + index - 20); } +#endif } -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) { - if (!ks || !key || key_len != 16) - return 0; gift128b_compute_round_keys (ks, be_load_word32(key), be_load_word32(key + 4), be_load_word32(key + 8), be_load_word32(key + 12)); - return 1; +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission */ + gift128b_compute_round_keys + (ks, le_load_word32(key + 12), le_load_word32(key + 8), + le_load_word32(key + 4), le_load_word32(key)); } /** @@ -521,11 +771,37 @@ int gift128b_init gift128b_inv_sbox(s3, s1, s2, s0); \ } while (0) +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) +{ + /* Mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = be_load_word32(key + 12); + ks->k[1] = be_load_word32(key + 4); + ks->k[2] = be_load_word32(key + 8); + ks->k[3] = be_load_word32(key); +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission + * and mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = le_load_word32(key); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key + 12); +} + +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into the state buffer and convert from big endian */ s0 = be_load_word32(input); @@ -534,14 +810,20 @@ void gift128b_encrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -555,6 +837,7 @@ void gift128b_encrypt_preloaded const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into local variables */ s0 = input[0]; @@ -563,14 +846,20 @@ void gift128b_encrypt_preloaded s3 = input[3]; /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer */ output[0] = s0; @@ -579,7 +868,55 @@ void gift128b_encrypt_preloaded output[3] = s3; } -void gift128b_decrypt +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; + uint32_t k[20]; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { @@ -592,14 +929,14 @@ void gift128b_decrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -608,173 +945,308 @@ void gift128b_decrypt be_store_word32(output + 12, s3); } -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { - /* Use the little-endian key byte order from the HYENA submission */ - if (!ks || !key || key_len != 16) - return 0; - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); - return 1; + uint32_t s0, s1, s2, s3; + + /* Copy the plaintext into local variables */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; + + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_encrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); } -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); + /* Copy the plaintext into the state buffer */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -void gift128n_encrypt +void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) + const unsigned char *input, uint32_t tweak) { + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if (((round + 1) % 5) == 0 && round < 39) + s0 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} + uint32_t s0, s1, s2, s3; -/* 4-bit tweak values expanded to 32-bit */ -static uint32_t const GIFT128_tweaks[16] = { - 0x00000000, 0xe1e1e1e1, 0xd2d2d2d2, 0x33333333, - 0xb4b4b4b4, 0x55555555, 0x66666666, 0x87878787, - 0x78787878, 0x99999999, 0xaaaaaaaa, 0x4b4b4b4b, - 0xcccccccc, 0x2d2d2d2d, 0x1e1e1e1e, 0xffffffff -}; + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); -void gift128t_encrypt + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + +void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; - /* Copy the plaintext into the state buffer and convert from nibbles */ + /* Copy the ciphertext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); @@ -782,25 +1254,24 @@ void gift128t_encrypt s3 = be_load_word32(output + 12); /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + * Every 5 rounds except the first we add the tweak value to the state */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - /* Pack the state into the ciphertext buffer in nibble form */ + /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); @@ -808,37 +1279,211 @@ void gift128t_encrypt gift128n_to_nibbles(output, output); } +#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +/* The small variant uses fixslicing for encryption, but we need to change + * to bitslicing for decryption because of the difficulty of fast-forwarding + * the fixsliced key schedule to the end. So the tiny variant is used for + * decryption when the small variant is selected. Since the NIST AEAD modes + * for GIFT-128 only use the block encrypt operation, the inefficiencies + * in decryption don't matter all that much */ + +/** + * \def gift128b_load_and_forward_schedule() + * \brief Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 40; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 10 times for the full 40 rounds. The overall + * effect is to apply a "20 right and 40 left" bit-rotation to every + * word in the key schedule. That is equivalent to "4 right and 8 left" + * on the 16-bit sub-words. + */ +#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#else +/* The small variant needs to also undo some of the rotations that were + * done to generate the fixsliced version of the key schedule */ +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ + gift128b_swap_move(w3, w3, 0x00003333U, 18); \ + gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ + gift128b_swap_move(w3, w3, 0x00550055U, 9); \ + gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ + gift128b_swap_move(w1, w1, 0x00003333U, 18); \ + gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ + gift128b_swap_move(w1, w1, 0x00550055U, 9); \ + gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ + gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ + gift128b_swap_move(w2, w2, 0x03030303U, 6); \ + gift128b_swap_move(w2, w2, 0x11111111U, 3); \ + gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ + gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ + gift128b_swap_move(w0, w0, 0x03030303U, 6); \ + gift128b_swap_move(w0, w0, 0x11111111U, 3); \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#endif + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); + + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Copy the ciphertext into the state buffer and convert from nibbles */ + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); s2 = be_load_word32(output + 8); s3 = be_load_word32(output + 12); - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if ((round % 5) == 0 && round < 40) + s0 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); @@ -847,3 +1492,7 @@ void gift128t_decrypt be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } + +#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.h index 1ac40e5..f57d143 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128.h @@ -47,11 +47,13 @@ * in any of the NIST submissions so we don't bother with it in this library. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ #include #include +#include "internal-gift128-config.h" #ifdef __cplusplus extern "C" { @@ -63,16 +65,23 @@ extern "C" { #define GIFT128_BLOCK_SIZE 16 /** - * \brief Number of round keys for the fixsliced representation of GIFT-128. + * \var GIFT128_ROUND_KEYS + * \brief Number of round keys for the GIFT-128 key schedule. */ +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY +#define GIFT128_ROUND_KEYS 4 +#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL +#define GIFT128_ROUND_KEYS 20 +#else #define GIFT128_ROUND_KEYS 80 +#endif /** * \brief Structure of the key schedule for GIFT-128 (bit-sliced). */ typedef struct { - /** Pre-computed round keys in the fixsliced form */ + /** Pre-computed round keys for bit-sliced GIFT-128 */ uint32_t k[GIFT128_ROUND_KEYS]; } gift128b_key_schedule_t; @@ -81,14 +90,9 @@ typedef struct * \brief Initializes the key schedule for GIFT-128 (bit-sliced). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). @@ -145,14 +149,9 @@ typedef gift128b_key_schedule_t gift128n_key_schedule_t; * \brief Initializes the key schedule for GIFT-128 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). @@ -182,13 +181,31 @@ void gift128n_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); +/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ +#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ +#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ +#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ +#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ +#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ +#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ +#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ +#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ +#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ +#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ +#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ +#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ +#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ +#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ +#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ +#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ + /** * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). * * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -200,7 +217,7 @@ void gift128n_decrypt */ void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); /** * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). @@ -208,7 +225,7 @@ void gift128t_encrypt * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -220,7 +237,7 @@ void gift128t_encrypt */ void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); #ifdef __cplusplus } diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-avr.S new file mode 100644 index 0000000..641613a --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-avr.S @@ -0,0 +1,2104 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 40 +table_0: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-full-avr.S new file mode 100644 index 0000000..ff11875 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-full-avr.S @@ -0,0 +1,5037 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r13,X+ + ld r12,X+ + ld r11,X+ + ld r10,X+ + ld r5,X+ + ld r4,X+ + ld r3,X+ + ld r2,X+ + ld r9,X+ + ld r8,X+ + ld r7,X+ + ld r6,X+ + ld r29,X+ + ld r28,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + ldi r24,4 +33: + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r29 + ror r28 + ror r0 + lsr r29 + ror r28 + ror r0 + or r29,r0 + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r28 + mov r28,r4 + mov r4,r0 + mov r0,r29 + mov r29,r5 + mov r5,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + mov r0,r6 + mov r6,r10 + mov r10,r0 + mov r0,r7 + mov r7,r11 + mov r11,r0 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + st Z,r29 + std Z+1,r23 + std Z+2,r28 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+4,r29 + std Z+5,r23 + std Z+6,r28 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r28,Z+10 + ldd r29,Z+11 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+8,r29 + std Z+9,r23 + std Z+10,r28 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+12,r29 + std Z+13,r23 + std Z+14,r28 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+16,r29 + std Z+17,r23 + std Z+18,r28 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+20,r29 + std Z+21,r23 + std Z+22,r28 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+24,r29 + std Z+25,r23 + std Z+26,r28 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+28,r29 + std Z+29,r23 + std Z+30,r28 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + adiw r30,40 + movw r26,r30 + subi r26,80 + sbc r27,r1 + ldi r24,6 +1274: + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r2 + eor r19,r3 + andi r18,51 + andi r19,51 + eor r2,r18 + eor r3,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + movw r18,r22 + movw r20,r28 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + andi r28,204 + andi r29,204 + or r28,r21 + or r29,r18 + or r22,r19 + or r23,r20 + movw r18,r28 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r28 + eor r19,r29 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r28 + std Z+5,r29 + std Z+6,r22 + std Z+7,r23 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + swap r3 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + swap r5 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r29 + adc r29,r1 + lsl r29 + adc r29,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r28 + std Z+15,r29 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + ldi r25,85 + and r2,r25 + and r3,r25 + and r4,r25 + and r5,r25 + or r2,r19 + or r3,r20 + or r4,r21 + or r5,r18 + std Z+16,r4 + std Z+17,r5 + std Z+18,r2 + std Z+19,r3 + movw r18,r22 + movw r20,r28 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + andi r28,170 + andi r29,170 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + or r22,r18 + or r23,r19 + or r28,r20 + or r29,r21 + std Z+20,r29 + std Z+21,r22 + std Z+22,r23 + std Z+23,r28 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r14,r18 + movw r16,r20 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + eor r14,r18 + eor r15,r19 + eor r16,r20 + eor r17,r21 + ldi r25,8 + and r14,r25 + and r15,r25 + andi r16,8 + andi r17,8 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + ldi r17,15 + and r2,r17 + and r3,r17 + and r4,r17 + and r5,r17 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + movw r18,r28 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r2,r22 + movw r4,r28 + ldi r16,1 + and r2,r16 + and r3,r16 + and r4,r16 + and r5,r16 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + or r2,r18 + or r3,r19 + movw r18,r28 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r2,r18 + or r3,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r4,r18 + or r5,r19 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r4,r22 + or r5,r23 + std Z+28,r2 + std Z+29,r3 + std Z+30,r4 + std Z+31,r5 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + std Z+32,r3 + std Z+33,r2 + std Z+34,r4 + std Z+35,r5 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r28 + mov r28,r29 + mov r29,r0 + lsl r28 + rol r29 + adc r28,r1 + lsl r28 + rol r29 + adc r28,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r28 + std Z+39,r29 + dec r24 + breq 1733f + adiw r30,40 + rjmp 1274b +1733: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,160 + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rjmp 768f +30: + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r1 + lsr r22 + ror r0 + lsr r22 + ror r0 + or r22,r0 + mov r0,r1 + lsr r23 + ror r0 + lsr r23 + ror r0 + or r23,r0 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + swap r4 + swap r5 + swap r6 + swap r7 + lsl r8 + adc r8,r1 + lsl r8 + adc r8,r1 + lsl r9 + adc r9,r1 + lsl r9 + adc r9,r1 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,119 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,17 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +768: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-small-avr.S new file mode 100644 index 0000000..77ef9fd --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-small-avr.S @@ -0,0 +1,6053 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +33: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +678: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + cpse r16,r1 + rjmp 678b + rjmp 1175f +830: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1175: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-tiny-avr.S new file mode 100644 index 0000000..e7a03f1 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-gift128b-tiny-avr.S @@ -0,0 +1,6766 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z,r22 + std Z+1,r23 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-util.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/sundae-gift.c index 984a4db..d192b8e 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/sundae-gift.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift128v1/rhys/sundae-gift.c @@ -140,8 +140,7 @@ static int sundae_gift_aead_encrypt *clen = mlen + SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Format and encrypt the initial domain separation block */ if (adlen > 0) @@ -205,8 +204,7 @@ static int sundae_gift_aead_decrypt len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Decrypt the ciphertext to produce the plaintext, using the * tag as the initialization vector for the decryption process */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.c b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/api.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/api.h deleted file mode 100644 index 6656888..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 8 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/encrypt.c b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/encrypt.c deleted file mode 100644 index c6f2a7d..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sundae-gift.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_64_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_64_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128-config.h deleted file mode 100644 index 62131ba..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128-config.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_CONFIG_H -#define LW_INTERNAL_GIFT128_CONFIG_H - -/** - * \file internal-gift128-config.h - * \brief Configures the variant of GIFT-128 to use. - */ - -/** - * \brief Select the full variant of GIFT-128. - * - * The full variant requires 320 bytes for the key schedule and uses the - * fixslicing method to implement encryption and decryption. - */ -#define GIFT128_VARIANT_FULL 0 - -/** - * \brief Select the small variant of GIFT-128. - * - * The small variant requires 80 bytes for the key schedule. The rest - * of the key schedule is expanded on the fly during encryption. - * - * The fixslicing method is used to implement encryption and the slower - * bitslicing method is used to implement decryption. The small variant - * is suitable when memory is at a premium, decryption is not needed, - * but encryption performance is still important. - */ -#define GIFT128_VARIANT_SMALL 1 - -/** - * \brief Select the tiny variant of GIFT-128. - * - * The tiny variant requires 16 bytes for the key schedule and uses the - * bitslicing method to implement encryption and decryption. It is suitable - * for use when memory is very tight and performance is not critical. - */ -#define GIFT128_VARIANT_TINY 2 - -/** - * \def GIFT128_VARIANT - * \brief Selects the default variant of GIFT-128 to use on this platform. - */ -/** - * \def GIFT128_VARIANT_ASM - * \brief Defined to 1 if the GIFT-128 implementation has been replaced - * with an assembly code version. - */ -#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 1 -#endif -#if !defined(GIFT128_VARIANT) -#define GIFT128_VARIANT GIFT128_VARIANT_FULL -#endif -#if !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 0 -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.c deleted file mode 100644 index c6ac5ec..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift128.h" -#include "internal-util.h" - -#if !GIFT128_VARIANT_ASM - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC_fixsliced[40] = { - 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, - 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, - 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, - 0x03020180, 0x8000002b, 0x10080880, 0x60014000, 0x01400002, 0x02020080, - 0x80000021, 0x10000080, 0x0001c000, 0x51000002, 0x03010180, 0x8000002e, - 0x10088800, 0x60012000, 0x40500002, 0x01030080, 0x80000006, 0x10008808, - 0xc001a000, 0x14500002, 0x01020181, 0x8000001a -}; - -#endif - -#if GIFT128_VARIANT != GIFT128_VARIANT_FULL - -/* Round constants for GIFT-128 in the bitsliced representation */ -static uint8_t const GIFT128_RC[40] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, - 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A -}; - -#endif - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 8 bits with respect to the next: - * - * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 - * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 - * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 - * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 - * - * The most efficient permutation from the online generator was P3, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P3 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate8(_x); \ - } while (0) -#define PERM1(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate16(_x); \ - } while (0) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate24(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -#define INV_PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x00550055, 9); \ - bit_permute_step(x, 0x00003333, 18); \ - bit_permute_step(x, 0x000f000f, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate8(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) \ - do { \ - uint32_t _x = rightRotate16(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate24(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = (x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); -} - -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); -} - -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift128b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t tmp = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= tmp; \ - (a) ^= tmp << (shift); \ - } while (0) - -/** - * \brief Derives the next 10 fixsliced keys in the key schedule. - * - * \param next Points to the buffer to receive the next 10 keys. - * \param prev Points to the buffer holding the previous 10 keys. - * - * The \a next and \a prev buffers are allowed to be the same. - */ -#define gift128b_derive_keys(next, prev) \ - do { \ - /* Key 0 */ \ - uint32_t s = (prev)[0]; \ - uint32_t t = (prev)[1]; \ - gift128b_swap_move(t, t, 0x00003333U, 16); \ - gift128b_swap_move(t, t, 0x55554444U, 1); \ - (next)[0] = t; \ - /* Key 1 */ \ - s = leftRotate8(s & 0x33333333U) | leftRotate16(s & 0xCCCCCCCCU); \ - gift128b_swap_move(s, s, 0x55551100U, 1); \ - (next)[1] = s; \ - /* Key 2 */ \ - s = (prev)[2]; \ - t = (prev)[3]; \ - (next)[2] = ((t >> 4) & 0x0F000F00U) | ((t & 0x0F000F00U) << 4) | \ - ((t >> 6) & 0x00030003U) | ((t & 0x003F003FU) << 2); \ - /* Key 3 */ \ - (next)[3] = ((s >> 6) & 0x03000300U) | ((s & 0x3F003F00U) << 2) | \ - ((s >> 5) & 0x00070007U) | ((s & 0x001F001FU) << 3); \ - /* Key 4 */ \ - s = (prev)[4]; \ - t = (prev)[5]; \ - (next)[4] = leftRotate8(t & 0xAAAAAAAAU) | \ - leftRotate16(t & 0x55555555U); \ - /* Key 5 */ \ - (next)[5] = leftRotate8(s & 0x55555555U) | \ - leftRotate12(s & 0xAAAAAAAAU); \ - /* Key 6 */ \ - s = (prev)[6]; \ - t = (prev)[7]; \ - (next)[6] = ((t >> 2) & 0x03030303U) | ((t & 0x03030303U) << 2) | \ - ((t >> 1) & 0x70707070U) | ((t & 0x10101010U) << 3); \ - /* Key 7 */ \ - (next)[7] = ((s >> 18) & 0x00003030U) | ((s & 0x01010101U) << 3) | \ - ((s >> 14) & 0x0000C0C0U) | ((s & 0x0000E0E0U) << 15) | \ - ((s >> 1) & 0x07070707U) | ((s & 0x00001010U) << 19); \ - /* Key 8 */ \ - s = (prev)[8]; \ - t = (prev)[9]; \ - (next)[8] = ((t >> 4) & 0x0FFF0000U) | ((t & 0x000F0000U) << 12) | \ - ((t >> 8) & 0x000000FFU) | ((t & 0x000000FFU) << 8); \ - /* Key 9 */ \ - (next)[9] = ((s >> 6) & 0x03FF0000U) | ((s & 0x003F0000U) << 10) | \ - ((s >> 4) & 0x00000FFFU) | ((s & 0x0000000FU) << 12); \ - } while (0) - -/** - * \brief Compute the round keys for GIFT-128 in the fixsliced representation. - * - * \param ks Points to the key schedule to initialize. - * \param k0 First key word. - * \param k1 Second key word. - * \param k2 Third key word. - * \param k3 Fourth key word. - */ -static void gift128b_compute_round_keys - (gift128b_key_schedule_t *ks, - uint32_t k0, uint32_t k1, uint32_t k2, uint32_t k3) -{ - unsigned index; - uint32_t temp; - - /* Set the regular key with k0 and k3 pre-swapped for the round function */ - ks->k[0] = k3; - ks->k[1] = k1; - ks->k[2] = k2; - ks->k[3] = k0; - - /* Pre-compute the keys for rounds 3..10 and permute into fixsliced form */ - for (index = 4; index < 20; index += 2) { - ks->k[index] = ks->k[index - 3]; - temp = ks->k[index - 4]; - temp = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - ks->k[index + 1] = temp; - } - for (index = 0; index < 20; index += 10) { - /* Keys 0 and 10 */ - temp = ks->k[index]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index] = temp; - - /* Keys 1 and 11 */ - temp = ks->k[index + 1]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 1] = temp; - - /* Keys 2 and 12 */ - temp = ks->k[index + 2]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 2] = temp; - - /* Keys 3 and 13 */ - temp = ks->k[index + 3]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 3] = temp; - - /* Keys 4 and 14 */ - temp = ks->k[index + 4]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 4] = temp; - - /* Keys 5 and 15 */ - temp = ks->k[index + 5]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 5] = temp; - - /* Keys 6 and 16 */ - temp = ks->k[index + 6]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 6] = temp; - - /* Keys 7 and 17 */ - temp = ks->k[index + 7]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 7] = temp; - - /* Keys 8, 9, 18, and 19 do not need any adjustment */ - } - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - /* Derive the fixsliced keys for the remaining rounds 11..40 */ - for (index = 20; index < 80; index += 10) { - gift128b_derive_keys(ks->k + index, ks->k + index - 20); - } -#endif -} - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - gift128b_compute_round_keys - (ks, be_load_word32(key), be_load_word32(key + 4), - be_load_word32(key + 8), be_load_word32(key + 12)); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission */ - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); -} - -/** - * \brief Performs the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s3 ^= 0xFFFFFFFFU; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s0 ^= 0xFFFFFFFFU; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/** - * \brief Permutes the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 3) & 0x11111111U) | ((s2 & 0x77777777U) << 1); \ - s3 = ((s3 >> 1) & 0x77777777U) | ((s3 & 0x11111111U) << 3); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 4) & 0x0FFF0FFFU) | ((s0 & 0x000F000FU) << 12); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 12) & 0x000F000FU) | ((s2 & 0x0FFF0FFFU) << 4); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s3 = leftRotate16(s3); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 6) & 0x03030303U) | ((s0 & 0x3F3F3F3FU) << 2); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 2) & 0x3F3F3F3FU) | ((s2 & 0x03030303U) << 6); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = rightRotate8(s2); \ - s3 = leftRotate8(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 1) & 0x77777777U) | ((s2 & 0x11111111U) << 3); \ - s3 = ((s3 >> 3) & 0x11111111U) | ((s3 & 0x77777777U) << 1); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 12) & 0x000F000FU) | ((s0 & 0x0FFF0FFFU) << 4); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 4) & 0x0FFF0FFFU) | ((s2 & 0x000F000FU) << 12); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - s3 = leftRotate16(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 2) & 0x3F3F3F3FU) | ((s0 & 0x03030303U) << 6); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 6) & 0x03030303U) | ((s2 & 0x3F3F3F3FU) << 2); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = leftRotate8(s2); \ - s3 = rightRotate8(s3); \ - } while (0); - -/** - * \brief Performs five fixsliced encryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of five rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 5 rounds. - */ -#define gift128b_encrypt_5_rounds(rk, rc) \ - do { \ - /* 1st round - S-box, rotate left, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_1(s0, s1, s2, s3); \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - \ - /* 2nd round - S-box, rotate up, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_2(s0, s1, s2, s3); \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_3(s0, s1, s2, s3); \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - \ - /* 4th round - S-box, rotate left and swap rows, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_4(s0, s1, s2, s3); \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - \ - /* 5th round - S-box, rotate up, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_5(s0, s1, s2, s3); \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - \ - /* Swap s0 and s3 in preparation for the next 1st round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - } while (0) - -/** - * \brief Performs five fixsliced decryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - */ -#define gift128b_decrypt_5_rounds(rk, rc) \ - do { \ - /* Swap s0 and s3 in preparation for the next 5th round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - \ - /* 5th round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - gift128b_inv_permute_state_5(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 4th round - S-box, rotate right and swap rows, add round key */ \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - gift128b_inv_permute_state_4(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - gift128b_inv_permute_state_3(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 2nd round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - gift128b_inv_permute_state_2(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 1st round - S-box, rotate right, add round key */ \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - gift128b_inv_permute_state_1(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - } while (0) - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - /* Mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = be_load_word32(key + 12); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission - * and mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = le_load_word32(key); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key + 12); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if (((round + 1) % 5) == 0 && round < 39) - s0 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -/* The small variant uses fixslicing for encryption, but we need to change - * to bitslicing for decryption because of the difficulty of fast-forwarding - * the fixsliced key schedule to the end. So the tiny variant is used for - * decryption when the small variant is selected. Since the NIST AEAD modes - * for GIFT-128 only use the block encrypt operation, the inefficiencies - * in decryption don't matter all that much */ - -/** - * \def gift128b_load_and_forward_schedule() - * \brief Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 40; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 10 times for the full 40 rounds. The overall - * effect is to apply a "20 right and 40 left" bit-rotation to every - * word in the key schedule. That is equivalent to "4 right and 8 left" - * on the 16-bit sub-words. - */ -#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#else -/* The small variant needs to also undo some of the rotations that were - * done to generate the fixsliced version of the key schedule */ -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ - gift128b_swap_move(w3, w3, 0x00003333U, 18); \ - gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ - gift128b_swap_move(w3, w3, 0x00550055U, 9); \ - gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ - gift128b_swap_move(w1, w1, 0x00003333U, 18); \ - gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ - gift128b_swap_move(w1, w1, 0x00550055U, 9); \ - gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ - gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ - gift128b_swap_move(w2, w2, 0x03030303U, 6); \ - gift128b_swap_move(w2, w2, 0x11111111U, 3); \ - gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ - gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ - gift128b_swap_move(w0, w0, 0x03030303U, 6); \ - gift128b_swap_move(w0, w0, 0x11111111U, 3); \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#endif - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if ((round % 5) == 0 && round < 40) - s0 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.h deleted file mode 100644 index f57d143..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_H -#define LW_INTERNAL_GIFT128_H - -/** - * \file internal-gift128.h - * \brief GIFT-128 block cipher. - * - * There are three versions of GIFT-128 in use within the second round - * submissions to the NIST lightweight cryptography competition. - * - * The most efficient version for 32-bit software implementation is the - * GIFT-128-b bit-sliced version from GIFT-COFB and SUNDAE-GIFT. - * - * The second is the nibble-based version from HYENA. We implement the - * HYENA version as a wrapper around the bit-sliced version. - * - * The third version is a variant on the HYENA nibble-based version that - * includes a 4-bit tweak value for domain separation. It is used by - * the ESTATE submission to NIST. - * - * Technically there is a fourth version of GIFT-128 which is the one that - * appeared in the original GIFT-128 paper. It is almost the same as the - * HYENA version except that the byte ordering is big-endian instead of - * HYENA's little-endian. The original version of GIFT-128 doesn't appear - * in any of the NIST submissions so we don't bother with it in this library. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include -#include "internal-gift128-config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of a GIFT-128 block in bytes. - */ -#define GIFT128_BLOCK_SIZE 16 - -/** - * \var GIFT128_ROUND_KEYS - * \brief Number of round keys for the GIFT-128 key schedule. - */ -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY -#define GIFT128_ROUND_KEYS 4 -#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL -#define GIFT128_ROUND_KEYS 20 -#else -#define GIFT128_ROUND_KEYS 80 -#endif - -/** - * \brief Structure of the key schedule for GIFT-128 (bit-sliced). - */ -typedef struct -{ - /** Pre-computed round keys for bit-sliced GIFT-128 */ - uint32_t k[GIFT128_ROUND_KEYS]; - -} gift128b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced and pre-loaded). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version assumes that the input has already been pre-loaded from - * big-endian into host byte order in the supplied word array. The output - * is delivered in the same way. - */ -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Structure of the key schedule for GIFT-128 (nibble-based). - */ -typedef gift128b_key_schedule_t gift128n_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ -#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ -#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ -#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ -#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ -#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ -#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ -#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ -#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ -#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ -#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ -#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ -#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ -#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ -#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ -#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ -#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ - -/** - * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -/** - * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-avr.S deleted file mode 100644 index 641613a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-avr.S +++ /dev/null @@ -1,2104 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 40 -table_0: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-full-avr.S deleted file mode 100644 index ff11875..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-full-avr.S +++ /dev/null @@ -1,5037 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r13,X+ - ld r12,X+ - ld r11,X+ - ld r10,X+ - ld r5,X+ - ld r4,X+ - ld r3,X+ - ld r2,X+ - ld r9,X+ - ld r8,X+ - ld r7,X+ - ld r6,X+ - ld r29,X+ - ld r28,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - ldi r24,4 -33: - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r29 - ror r28 - ror r0 - lsr r29 - ror r28 - ror r0 - or r29,r0 - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r28 - mov r28,r4 - mov r4,r0 - mov r0,r29 - mov r29,r5 - mov r5,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - mov r0,r6 - mov r6,r10 - mov r10,r0 - mov r0,r7 - mov r7,r11 - mov r11,r0 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - st Z,r29 - std Z+1,r23 - std Z+2,r28 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+4,r29 - std Z+5,r23 - std Z+6,r28 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r28,Z+10 - ldd r29,Z+11 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+8,r29 - std Z+9,r23 - std Z+10,r28 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+12,r29 - std Z+13,r23 - std Z+14,r28 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+16,r29 - std Z+17,r23 - std Z+18,r28 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+20,r29 - std Z+21,r23 - std Z+22,r28 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+24,r29 - std Z+25,r23 - std Z+26,r28 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+28,r29 - std Z+29,r23 - std Z+30,r28 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - adiw r30,40 - movw r26,r30 - subi r26,80 - sbc r27,r1 - ldi r24,6 -1274: - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r2 - eor r19,r3 - andi r18,51 - andi r19,51 - eor r2,r18 - eor r3,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - movw r18,r22 - movw r20,r28 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - andi r28,204 - andi r29,204 - or r28,r21 - or r29,r18 - or r22,r19 - or r23,r20 - movw r18,r28 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r28 - eor r19,r29 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r28 - std Z+5,r29 - std Z+6,r22 - std Z+7,r23 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - swap r3 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - swap r5 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r29 - adc r29,r1 - lsl r29 - adc r29,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r28 - std Z+15,r29 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - ldi r25,85 - and r2,r25 - and r3,r25 - and r4,r25 - and r5,r25 - or r2,r19 - or r3,r20 - or r4,r21 - or r5,r18 - std Z+16,r4 - std Z+17,r5 - std Z+18,r2 - std Z+19,r3 - movw r18,r22 - movw r20,r28 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - andi r28,170 - andi r29,170 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - or r22,r18 - or r23,r19 - or r28,r20 - or r29,r21 - std Z+20,r29 - std Z+21,r22 - std Z+22,r23 - std Z+23,r28 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r14,r18 - movw r16,r20 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - eor r14,r18 - eor r15,r19 - eor r16,r20 - eor r17,r21 - ldi r25,8 - and r14,r25 - and r15,r25 - andi r16,8 - andi r17,8 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - ldi r17,15 - and r2,r17 - and r3,r17 - and r4,r17 - and r5,r17 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - movw r18,r28 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r2,r22 - movw r4,r28 - ldi r16,1 - and r2,r16 - and r3,r16 - and r4,r16 - and r5,r16 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - or r2,r18 - or r3,r19 - movw r18,r28 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r2,r18 - or r3,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r4,r18 - or r5,r19 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r4,r22 - or r5,r23 - std Z+28,r2 - std Z+29,r3 - std Z+30,r4 - std Z+31,r5 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - std Z+32,r3 - std Z+33,r2 - std Z+34,r4 - std Z+35,r5 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r28 - mov r28,r29 - mov r29,r0 - lsl r28 - rol r29 - adc r28,r1 - lsl r28 - rol r29 - adc r28,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r28 - std Z+39,r29 - dec r24 - breq 1733f - adiw r30,40 - rjmp 1274b -1733: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,160 - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rjmp 768f -30: - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r1 - lsr r22 - ror r0 - lsr r22 - ror r0 - or r22,r0 - mov r0,r1 - lsr r23 - ror r0 - lsr r23 - ror r0 - or r23,r0 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - swap r4 - swap r5 - swap r6 - swap r7 - lsl r8 - adc r8,r1 - lsl r8 - adc r8,r1 - lsl r9 - adc r9,r1 - lsl r9 - adc r9,r1 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,119 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,17 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -768: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-small-avr.S deleted file mode 100644 index 77ef9fd..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-small-avr.S +++ /dev/null @@ -1,6053 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -33: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -678: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - cpse r16,r1 - rjmp 678b - rjmp 1175f -830: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1175: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-tiny-avr.S deleted file mode 100644 index e7a03f1..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-gift128b-tiny-avr.S +++ /dev/null @@ -1,6766 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z,r22 - std Z+1,r23 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.c deleted file mode 100644 index d192b8e..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sundae-gift.h" -#include "internal-gift128.h" -#include "internal-util.h" -#include - -aead_cipher_t const sundae_gift_0_cipher = { - "SUNDAE-GIFT-0", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_0_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_0_aead_encrypt, - sundae_gift_0_aead_decrypt -}; - -aead_cipher_t const sundae_gift_64_cipher = { - "SUNDAE-GIFT-64", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_64_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_64_aead_encrypt, - sundae_gift_64_aead_decrypt -}; - -aead_cipher_t const sundae_gift_96_cipher = { - "SUNDAE-GIFT-96", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_96_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_96_aead_encrypt, - sundae_gift_96_aead_decrypt -}; - -aead_cipher_t const sundae_gift_128_cipher = { - "SUNDAE-GIFT-128", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_128_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_128_aead_encrypt, - sundae_gift_128_aead_decrypt -}; - -/* Multiply a block value by 2 in the special byte field */ -STATIC_INLINE void sundae_gift_multiply(unsigned char B[16]) -{ - unsigned char B0 = B[0]; - unsigned index; - for (index = 0; index < 15; ++index) - B[index] = B[index + 1]; - B[15] = B0; - B[10] ^= B0; - B[12] ^= B0; - B[14] ^= B0; -} - -/* Compute a MAC over the concatenation of two data buffers */ -static void sundae_gift_aead_mac - (const gift128b_key_schedule_t *ks, unsigned char V[16], - const unsigned char *data1, unsigned data1len, - const unsigned char *data2, unsigned long data2len) -{ - unsigned len; - - /* Nothing to do if the input is empty */ - if (!data1len && !data2len) - return; - - /* Format the first block. We assume that data1len <= 16 - * as it is will be the nonce if it is non-zero in length */ - lw_xor_block(V, data1, data1len); - len = 16 - data1len; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V + data1len, data2, len); - data2 += len; - data2len -= len; - len += data1len; - - /* Process as many full blocks as we can, except the last */ - while (data2len > 0) { - gift128b_encrypt(ks, V, V); - len = 16; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V, data2, len); - data2 += len; - data2len -= len; - } - - /* Pad and process the last block */ - if (len < 16) { - V[len] ^= 0x80; - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } else { - sundae_gift_multiply(V); - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } -} - -static int sundae_gift_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char P[16]; - - /* Compute the length of the output ciphertext */ - *clen = mlen + SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (mlen > 0) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, T, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, T, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, T, 0, 0, m, mlen); - - /* Encrypt the plaintext to produce the ciphertext. We need to be - * careful how we manage the data because we could be doing in-place - * encryption. In SUNDAE-GIFT, the first 16 bytes of the ciphertext - * is the tag rather than the last 16 bytes in other algorithms. - * We need to swap the plaintext for the current block with the - * ciphertext or tag from the previous block */ - memcpy(V, T, 16); - while (mlen >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(P, V, m, 16); - memcpy(c, T, 16); - memcpy(T, P, 16); - c += 16; - m += 16; - mlen -= 16; - } - if (mlen > 0) { - unsigned leftover = (unsigned)mlen; - gift128b_encrypt(&ks, V, V); - lw_xor_block(V, m, leftover); - memcpy(c, T, 16); - memcpy(c + 16, V, leftover); - } else { - memcpy(c, T, 16); - } - return 0; -} - -static int sundae_gift_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char *mtemp; - unsigned long len; - - /* Bail out if the ciphertext is too short */ - if (clen < SUNDAE_GIFT_TAG_SIZE) - return -1; - len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Decrypt the ciphertext to produce the plaintext, using the - * tag as the initialization vector for the decryption process */ - memcpy(T, c, SUNDAE_GIFT_TAG_SIZE); - c += SUNDAE_GIFT_TAG_SIZE; - mtemp = m; - memcpy(V, T, 16); - while (len >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, 16); - c += 16; - mtemp += 16; - len -= 16; - } - if (len > 0) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, (unsigned)len); - } - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (clen > SUNDAE_GIFT_TAG_SIZE) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, V, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, V, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, V, 0, 0, m, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, T, V, 16); -} - -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} - -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.h deleted file mode 100644 index 9040dd5..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys-avr/sundae-gift.h +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SUNDAE_GIFT_H -#define LWCRYPTO_SUNDAE_GIFT_H - -#include "aead-common.h" - -/** - * \file sundae-gift.h - * \brief SUNDAE-GIFT encryption algorithm family. - * - * The SUNDAE-GIFT family consists of several related algorithms: - * - * \li SUNDAE-GIFT-0 with a 128-bit key, a 0-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-64 with a 128-bit key, a 64-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-96 with a 128-bit key, a 96-bit nonce, and 128-bit tag. - * This is the primary member of the family. - * \li SUNDAE-GIFT-128 with a 128-bit key, a 128-bit nonce, and 128-bit tag. - * - * SUNDAE-GIFT is resistant against nonce reuse as long as the combination - * of the associated data and plaintext is unique. - * - * If a nonce is reused (or there is no nonce in the case of SUNDAE-GIFT-0), - * then two packets with the same associated data and plaintext will encrypt - * to the same ciphertext. This will leak that the same plaintext has been - * sent for a second time but will not reveal the plaintext itself. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-0. - */ -#define SUNDAE_GIFT_0_NONCE_SIZE 0 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-64. - */ -#define SUNDAE_GIFT_64_NONCE_SIZE 8 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-96. - */ -#define SUNDAE_GIFT_96_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-128. - */ -#define SUNDAE_GIFT_128_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the SUNDAE-GIFT-0 cipher. - */ -extern aead_cipher_t const sundae_gift_0_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-64 cipher. - */ -extern aead_cipher_t const sundae_gift_64_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-96 cipher. - */ -extern aead_cipher_t const sundae_gift_96_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-128 cipher. - */ -extern aead_cipher_t const sundae_gift_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_0_aead_decrypt() - */ -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_0_aead_encrypt() - */ -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_64_aead_decrypt() - */ -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_64_aead_encrypt() - */ -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_96_aead_decrypt() - */ -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_96_aead_encrypt() - */ -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_128_aead_decrypt() - */ -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-12896. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_128_aead_encrypt() - */ -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128-config.h new file mode 100644 index 0000000..62131ba --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128-config.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIFT128_CONFIG_H +#define LW_INTERNAL_GIFT128_CONFIG_H + +/** + * \file internal-gift128-config.h + * \brief Configures the variant of GIFT-128 to use. + */ + +/** + * \brief Select the full variant of GIFT-128. + * + * The full variant requires 320 bytes for the key schedule and uses the + * fixslicing method to implement encryption and decryption. + */ +#define GIFT128_VARIANT_FULL 0 + +/** + * \brief Select the small variant of GIFT-128. + * + * The small variant requires 80 bytes for the key schedule. The rest + * of the key schedule is expanded on the fly during encryption. + * + * The fixslicing method is used to implement encryption and the slower + * bitslicing method is used to implement decryption. The small variant + * is suitable when memory is at a premium, decryption is not needed, + * but encryption performance is still important. + */ +#define GIFT128_VARIANT_SMALL 1 + +/** + * \brief Select the tiny variant of GIFT-128. + * + * The tiny variant requires 16 bytes for the key schedule and uses the + * bitslicing method to implement encryption and decryption. It is suitable + * for use when memory is very tight and performance is not critical. + */ +#define GIFT128_VARIANT_TINY 2 + +/** + * \def GIFT128_VARIANT + * \brief Selects the default variant of GIFT-128 to use on this platform. + */ +/** + * \def GIFT128_VARIANT_ASM + * \brief Defined to 1 if the GIFT-128 implementation has been replaced + * with an assembly code version. + */ +#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 1 +#endif +#if !defined(GIFT128_VARIANT) +#define GIFT128_VARIANT GIFT128_VARIANT_FULL +#endif +#if !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 0 +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.c index 681dbc8..c6ac5ec 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.c @@ -23,8 +23,12 @@ #include "internal-gift128.h" #include "internal-util.h" +#if !GIFT128_VARIANT_ASM + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC[40] = { +static uint32_t const GIFT128_RC_fixsliced[40] = { 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, @@ -34,6 +38,246 @@ static uint32_t const GIFT128_RC[40] = { 0xc001a000, 0x14500002, 0x01020181, 0x8000001a }; +#endif + +#if GIFT128_VARIANT != GIFT128_VARIANT_FULL + +/* Round constants for GIFT-128 in the bitsliced representation */ +static uint8_t const GIFT128_RC[40] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, + 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A +}; + +#endif + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 8 bits with respect to the next: + * + * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 + * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 + * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 + * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 + * + * The most efficient permutation from the online generator was P3, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P3 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate8(_x); \ + } while (0) +#define PERM1(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate16(_x); \ + } while (0) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate24(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +#define INV_PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x00550055, 9); \ + bit_permute_step(x, 0x00003333, 18); \ + bit_permute_step(x, 0x000f000f, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate8(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) \ + do { \ + uint32_t _x = rightRotate16(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate24(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = (x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +/** + * \brief Converts the GIFT-128 nibble-based representation into word-based. + * + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. + * + * The \a input and \a output buffers can be the same buffer. + */ +static void gift128n_to_words + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input buffer into 32-bit words. We use the nibble order + * from the HYENA submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-128 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 12); + s1 = le_load_word32(input + 8); + s2 = le_load_word32(input + 4); + s3 = le_load_word32(input); + + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + PERM_WORDS(s2); + PERM_WORDS(s3); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)s2; + output[3] = (uint8_t)s3; + output[4] = (uint8_t)(s0 >> 8); + output[5] = (uint8_t)(s1 >> 8); + output[6] = (uint8_t)(s2 >> 8); + output[7] = (uint8_t)(s3 >> 8); + output[8] = (uint8_t)(s0 >> 16); + output[9] = (uint8_t)(s1 >> 16); + output[10] = (uint8_t)(s2 >> 16); + output[11] = (uint8_t)(s3 >> 16); + output[12] = (uint8_t)(s0 >> 24); + output[13] = (uint8_t)(s1 >> 24); + output[14] = (uint8_t)(s2 >> 24); + output[15] = (uint8_t)(s3 >> 24); +} + +/** + * \brief Converts the GIFT-128 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift128n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s3 contains the least significant */ + s0 = (((uint32_t)(input[12])) << 24) | + (((uint32_t)(input[8])) << 16) | + (((uint32_t)(input[4])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[13])) << 24) | + (((uint32_t)(input[9])) << 16) | + (((uint32_t)(input[5])) << 8) | + ((uint32_t)(input[1])); + s2 = (((uint32_t)(input[14])) << 24) | + (((uint32_t)(input[10])) << 16) | + (((uint32_t)(input[6])) << 8) | + ((uint32_t)(input[2])); + s3 = (((uint32_t)(input[15])) << 24) | + (((uint32_t)(input[11])) << 16) | + (((uint32_t)(input[7])) << 8) | + ((uint32_t)(input[3])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + INV_PERM_WORDS(s2); + INV_PERM_WORDS(s3); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 12, s0); + le_store_word32(output + 8, s1); + le_store_word32(output + 4, s2); + le_store_word32(output, s3); +} + +void gift128n_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_encrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +void gift128n_decrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_decrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /** * \brief Swaps bits within two words. * @@ -202,21 +446,27 @@ static void gift128b_compute_round_keys /* Keys 8, 9, 18, and 19 do not need any adjustment */ } +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL /* Derive the fixsliced keys for the remaining rounds 11..40 */ for (index = 20; index < 80; index += 10) { gift128b_derive_keys(ks->k + index, ks->k + index - 20); } +#endif } -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) { - if (!ks || !key || key_len != 16) - return 0; gift128b_compute_round_keys (ks, be_load_word32(key), be_load_word32(key + 4), be_load_word32(key + 8), be_load_word32(key + 12)); - return 1; +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission */ + gift128b_compute_round_keys + (ks, le_load_word32(key + 12), le_load_word32(key + 8), + le_load_word32(key + 4), le_load_word32(key)); } /** @@ -521,11 +771,37 @@ int gift128b_init gift128b_inv_sbox(s3, s1, s2, s0); \ } while (0) +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) +{ + /* Mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = be_load_word32(key + 12); + ks->k[1] = be_load_word32(key + 4); + ks->k[2] = be_load_word32(key + 8); + ks->k[3] = be_load_word32(key); +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission + * and mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = le_load_word32(key); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key + 12); +} + +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into the state buffer and convert from big endian */ s0 = be_load_word32(input); @@ -534,14 +810,20 @@ void gift128b_encrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -555,6 +837,7 @@ void gift128b_encrypt_preloaded const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into local variables */ s0 = input[0]; @@ -563,14 +846,20 @@ void gift128b_encrypt_preloaded s3 = input[3]; /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer */ output[0] = s0; @@ -579,7 +868,55 @@ void gift128b_encrypt_preloaded output[3] = s3; } -void gift128b_decrypt +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; + uint32_t k[20]; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { @@ -592,14 +929,14 @@ void gift128b_decrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -608,173 +945,308 @@ void gift128b_decrypt be_store_word32(output + 12, s3); } -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { - /* Use the little-endian key byte order from the HYENA submission */ - if (!ks || !key || key_len != 16) - return 0; - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); - return 1; + uint32_t s0, s1, s2, s3; + + /* Copy the plaintext into local variables */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; + + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_encrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); } -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); + /* Copy the plaintext into the state buffer */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -void gift128n_encrypt +void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) + const unsigned char *input, uint32_t tweak) { + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if (((round + 1) % 5) == 0 && round < 39) + s0 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} + uint32_t s0, s1, s2, s3; -/* 4-bit tweak values expanded to 32-bit */ -static uint32_t const GIFT128_tweaks[16] = { - 0x00000000, 0xe1e1e1e1, 0xd2d2d2d2, 0x33333333, - 0xb4b4b4b4, 0x55555555, 0x66666666, 0x87878787, - 0x78787878, 0x99999999, 0xaaaaaaaa, 0x4b4b4b4b, - 0xcccccccc, 0x2d2d2d2d, 0x1e1e1e1e, 0xffffffff -}; + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); -void gift128t_encrypt + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + +void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; - /* Copy the plaintext into the state buffer and convert from nibbles */ + /* Copy the ciphertext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); @@ -782,25 +1254,24 @@ void gift128t_encrypt s3 = be_load_word32(output + 12); /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + * Every 5 rounds except the first we add the tweak value to the state */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - /* Pack the state into the ciphertext buffer in nibble form */ + /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); @@ -808,37 +1279,211 @@ void gift128t_encrypt gift128n_to_nibbles(output, output); } +#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +/* The small variant uses fixslicing for encryption, but we need to change + * to bitslicing for decryption because of the difficulty of fast-forwarding + * the fixsliced key schedule to the end. So the tiny variant is used for + * decryption when the small variant is selected. Since the NIST AEAD modes + * for GIFT-128 only use the block encrypt operation, the inefficiencies + * in decryption don't matter all that much */ + +/** + * \def gift128b_load_and_forward_schedule() + * \brief Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 40; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 10 times for the full 40 rounds. The overall + * effect is to apply a "20 right and 40 left" bit-rotation to every + * word in the key schedule. That is equivalent to "4 right and 8 left" + * on the 16-bit sub-words. + */ +#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#else +/* The small variant needs to also undo some of the rotations that were + * done to generate the fixsliced version of the key schedule */ +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ + gift128b_swap_move(w3, w3, 0x00003333U, 18); \ + gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ + gift128b_swap_move(w3, w3, 0x00550055U, 9); \ + gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ + gift128b_swap_move(w1, w1, 0x00003333U, 18); \ + gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ + gift128b_swap_move(w1, w1, 0x00550055U, 9); \ + gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ + gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ + gift128b_swap_move(w2, w2, 0x03030303U, 6); \ + gift128b_swap_move(w2, w2, 0x11111111U, 3); \ + gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ + gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ + gift128b_swap_move(w0, w0, 0x03030303U, 6); \ + gift128b_swap_move(w0, w0, 0x11111111U, 3); \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#endif + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); + + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Copy the ciphertext into the state buffer and convert from nibbles */ + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); s2 = be_load_word32(output + 8); s3 = be_load_word32(output + 12); - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if ((round % 5) == 0 && round < 40) + s0 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); @@ -847,3 +1492,7 @@ void gift128t_decrypt be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } + +#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.h index 1ac40e5..f57d143 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128.h @@ -47,11 +47,13 @@ * in any of the NIST submissions so we don't bother with it in this library. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ #include #include +#include "internal-gift128-config.h" #ifdef __cplusplus extern "C" { @@ -63,16 +65,23 @@ extern "C" { #define GIFT128_BLOCK_SIZE 16 /** - * \brief Number of round keys for the fixsliced representation of GIFT-128. + * \var GIFT128_ROUND_KEYS + * \brief Number of round keys for the GIFT-128 key schedule. */ +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY +#define GIFT128_ROUND_KEYS 4 +#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL +#define GIFT128_ROUND_KEYS 20 +#else #define GIFT128_ROUND_KEYS 80 +#endif /** * \brief Structure of the key schedule for GIFT-128 (bit-sliced). */ typedef struct { - /** Pre-computed round keys in the fixsliced form */ + /** Pre-computed round keys for bit-sliced GIFT-128 */ uint32_t k[GIFT128_ROUND_KEYS]; } gift128b_key_schedule_t; @@ -81,14 +90,9 @@ typedef struct * \brief Initializes the key schedule for GIFT-128 (bit-sliced). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). @@ -145,14 +149,9 @@ typedef gift128b_key_schedule_t gift128n_key_schedule_t; * \brief Initializes the key schedule for GIFT-128 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). @@ -182,13 +181,31 @@ void gift128n_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); +/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ +#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ +#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ +#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ +#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ +#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ +#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ +#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ +#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ +#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ +#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ +#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ +#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ +#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ +#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ +#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ +#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ + /** * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). * * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -200,7 +217,7 @@ void gift128n_decrypt */ void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); /** * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). @@ -208,7 +225,7 @@ void gift128t_encrypt * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -220,7 +237,7 @@ void gift128t_encrypt */ void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); #ifdef __cplusplus } diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-avr.S new file mode 100644 index 0000000..641613a --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-avr.S @@ -0,0 +1,2104 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 40 +table_0: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-full-avr.S new file mode 100644 index 0000000..ff11875 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-full-avr.S @@ -0,0 +1,5037 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r13,X+ + ld r12,X+ + ld r11,X+ + ld r10,X+ + ld r5,X+ + ld r4,X+ + ld r3,X+ + ld r2,X+ + ld r9,X+ + ld r8,X+ + ld r7,X+ + ld r6,X+ + ld r29,X+ + ld r28,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + ldi r24,4 +33: + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r29 + ror r28 + ror r0 + lsr r29 + ror r28 + ror r0 + or r29,r0 + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r28 + mov r28,r4 + mov r4,r0 + mov r0,r29 + mov r29,r5 + mov r5,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + mov r0,r6 + mov r6,r10 + mov r10,r0 + mov r0,r7 + mov r7,r11 + mov r11,r0 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + st Z,r29 + std Z+1,r23 + std Z+2,r28 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+4,r29 + std Z+5,r23 + std Z+6,r28 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r28,Z+10 + ldd r29,Z+11 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+8,r29 + std Z+9,r23 + std Z+10,r28 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+12,r29 + std Z+13,r23 + std Z+14,r28 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+16,r29 + std Z+17,r23 + std Z+18,r28 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+20,r29 + std Z+21,r23 + std Z+22,r28 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+24,r29 + std Z+25,r23 + std Z+26,r28 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+28,r29 + std Z+29,r23 + std Z+30,r28 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + adiw r30,40 + movw r26,r30 + subi r26,80 + sbc r27,r1 + ldi r24,6 +1274: + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r2 + eor r19,r3 + andi r18,51 + andi r19,51 + eor r2,r18 + eor r3,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + movw r18,r22 + movw r20,r28 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + andi r28,204 + andi r29,204 + or r28,r21 + or r29,r18 + or r22,r19 + or r23,r20 + movw r18,r28 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r28 + eor r19,r29 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r28 + std Z+5,r29 + std Z+6,r22 + std Z+7,r23 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + swap r3 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + swap r5 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r29 + adc r29,r1 + lsl r29 + adc r29,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r28 + std Z+15,r29 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + ldi r25,85 + and r2,r25 + and r3,r25 + and r4,r25 + and r5,r25 + or r2,r19 + or r3,r20 + or r4,r21 + or r5,r18 + std Z+16,r4 + std Z+17,r5 + std Z+18,r2 + std Z+19,r3 + movw r18,r22 + movw r20,r28 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + andi r28,170 + andi r29,170 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + or r22,r18 + or r23,r19 + or r28,r20 + or r29,r21 + std Z+20,r29 + std Z+21,r22 + std Z+22,r23 + std Z+23,r28 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r14,r18 + movw r16,r20 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + eor r14,r18 + eor r15,r19 + eor r16,r20 + eor r17,r21 + ldi r25,8 + and r14,r25 + and r15,r25 + andi r16,8 + andi r17,8 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + ldi r17,15 + and r2,r17 + and r3,r17 + and r4,r17 + and r5,r17 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + movw r18,r28 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r2,r22 + movw r4,r28 + ldi r16,1 + and r2,r16 + and r3,r16 + and r4,r16 + and r5,r16 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + or r2,r18 + or r3,r19 + movw r18,r28 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r2,r18 + or r3,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r4,r18 + or r5,r19 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r4,r22 + or r5,r23 + std Z+28,r2 + std Z+29,r3 + std Z+30,r4 + std Z+31,r5 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + std Z+32,r3 + std Z+33,r2 + std Z+34,r4 + std Z+35,r5 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r28 + mov r28,r29 + mov r29,r0 + lsl r28 + rol r29 + adc r28,r1 + lsl r28 + rol r29 + adc r28,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r28 + std Z+39,r29 + dec r24 + breq 1733f + adiw r30,40 + rjmp 1274b +1733: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,160 + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rjmp 768f +30: + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r1 + lsr r22 + ror r0 + lsr r22 + ror r0 + or r22,r0 + mov r0,r1 + lsr r23 + ror r0 + lsr r23 + ror r0 + or r23,r0 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + swap r4 + swap r5 + swap r6 + swap r7 + lsl r8 + adc r8,r1 + lsl r8 + adc r8,r1 + lsl r9 + adc r9,r1 + lsl r9 + adc r9,r1 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,119 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,17 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +768: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-small-avr.S new file mode 100644 index 0000000..77ef9fd --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-small-avr.S @@ -0,0 +1,6053 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +33: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +678: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + cpse r16,r1 + rjmp 678b + rjmp 1175f +830: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1175: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-tiny-avr.S new file mode 100644 index 0000000..e7a03f1 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-gift128b-tiny-avr.S @@ -0,0 +1,6766 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z,r22 + std Z+1,r23 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-util.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/sundae-gift.c index 984a4db..d192b8e 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/sundae-gift.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift64v1/rhys/sundae-gift.c @@ -140,8 +140,7 @@ static int sundae_gift_aead_encrypt *clen = mlen + SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Format and encrypt the initial domain separation block */ if (adlen > 0) @@ -205,8 +204,7 @@ static int sundae_gift_aead_decrypt len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Decrypt the ciphertext to produce the plaintext, using the * tag as the initialization vector for the decryption process */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.c b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/api.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/api.h deleted file mode 100644 index c3c0a27..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/encrypt.c b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/encrypt.c deleted file mode 100644 index a358142..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "sundae-gift.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_96_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return sundae_gift_96_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128-config.h deleted file mode 100644 index 62131ba..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128-config.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_CONFIG_H -#define LW_INTERNAL_GIFT128_CONFIG_H - -/** - * \file internal-gift128-config.h - * \brief Configures the variant of GIFT-128 to use. - */ - -/** - * \brief Select the full variant of GIFT-128. - * - * The full variant requires 320 bytes for the key schedule and uses the - * fixslicing method to implement encryption and decryption. - */ -#define GIFT128_VARIANT_FULL 0 - -/** - * \brief Select the small variant of GIFT-128. - * - * The small variant requires 80 bytes for the key schedule. The rest - * of the key schedule is expanded on the fly during encryption. - * - * The fixslicing method is used to implement encryption and the slower - * bitslicing method is used to implement decryption. The small variant - * is suitable when memory is at a premium, decryption is not needed, - * but encryption performance is still important. - */ -#define GIFT128_VARIANT_SMALL 1 - -/** - * \brief Select the tiny variant of GIFT-128. - * - * The tiny variant requires 16 bytes for the key schedule and uses the - * bitslicing method to implement encryption and decryption. It is suitable - * for use when memory is very tight and performance is not critical. - */ -#define GIFT128_VARIANT_TINY 2 - -/** - * \def GIFT128_VARIANT - * \brief Selects the default variant of GIFT-128 to use on this platform. - */ -/** - * \def GIFT128_VARIANT_ASM - * \brief Defined to 1 if the GIFT-128 implementation has been replaced - * with an assembly code version. - */ -#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 1 -#endif -#if !defined(GIFT128_VARIANT) -#define GIFT128_VARIANT GIFT128_VARIANT_FULL -#endif -#if !defined(GIFT128_VARIANT_ASM) -#define GIFT128_VARIANT_ASM 0 -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.c deleted file mode 100644 index c6ac5ec..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-gift128.h" -#include "internal-util.h" - -#if !GIFT128_VARIANT_ASM - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC_fixsliced[40] = { - 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, - 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, - 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, - 0x03020180, 0x8000002b, 0x10080880, 0x60014000, 0x01400002, 0x02020080, - 0x80000021, 0x10000080, 0x0001c000, 0x51000002, 0x03010180, 0x8000002e, - 0x10088800, 0x60012000, 0x40500002, 0x01030080, 0x80000006, 0x10008808, - 0xc001a000, 0x14500002, 0x01020181, 0x8000001a -}; - -#endif - -#if GIFT128_VARIANT != GIFT128_VARIANT_FULL - -/* Round constants for GIFT-128 in the bitsliced representation */ -static uint8_t const GIFT128_RC[40] = { - 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, - 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, - 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, - 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, - 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A -}; - -#endif - -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) - -/* - * The permutation below was generated by the online permuation generator at - * "http://programming.sirrida.de/calcperm.php". - * - * All of the permutuations are essentially the same, except that each is - * rotated by 8 bits with respect to the next: - * - * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 - * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 - * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 - * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 - * - * The most efficient permutation from the online generator was P3, so we - * perform it as the core of the others, and then perform a final rotation. - * - * It is possible to do slightly better than "P3 then rotate" on desktop and - * server architectures for the other permutations. But the advantage isn't - * as evident on embedded platforms so we keep things simple. - */ -#define PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define PERM0(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate8(_x); \ - } while (0) -#define PERM1(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate16(_x); \ - } while (0) -#define PERM2(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = leftRotate24(_x); \ - } while (0) -#define PERM3(x) \ - do { \ - uint32_t _x = (x); \ - PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -#define INV_PERM3_INNER(x) \ - do { \ - bit_permute_step(x, 0x00550055, 9); \ - bit_permute_step(x, 0x00003333, 18); \ - bit_permute_step(x, 0x000f000f, 12); \ - bit_permute_step(x, 0x000000ff, 24); \ - } while (0) -#define INV_PERM0(x) \ - do { \ - uint32_t _x = rightRotate8(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM1(x) \ - do { \ - uint32_t _x = rightRotate16(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM2(x) \ - do { \ - uint32_t _x = rightRotate24(x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) -#define INV_PERM3(x) \ - do { \ - uint32_t _x = (x); \ - INV_PERM3_INNER(_x); \ - (x) = _x; \ - } while (0) - -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); - - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); - - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); -} - -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); - - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); - - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); -} - -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} - -#if GIFT128_VARIANT != GIFT128_VARIANT_TINY - -/** - * \brief Swaps bits within two words. - * - * \param a The first word. - * \param b The second word. - * \param mask Mask for the bits to shift. - * \param shift Shift amount in bits. - */ -#define gift128b_swap_move(a, b, mask, shift) \ - do { \ - uint32_t tmp = ((b) ^ ((a) >> (shift))) & (mask); \ - (b) ^= tmp; \ - (a) ^= tmp << (shift); \ - } while (0) - -/** - * \brief Derives the next 10 fixsliced keys in the key schedule. - * - * \param next Points to the buffer to receive the next 10 keys. - * \param prev Points to the buffer holding the previous 10 keys. - * - * The \a next and \a prev buffers are allowed to be the same. - */ -#define gift128b_derive_keys(next, prev) \ - do { \ - /* Key 0 */ \ - uint32_t s = (prev)[0]; \ - uint32_t t = (prev)[1]; \ - gift128b_swap_move(t, t, 0x00003333U, 16); \ - gift128b_swap_move(t, t, 0x55554444U, 1); \ - (next)[0] = t; \ - /* Key 1 */ \ - s = leftRotate8(s & 0x33333333U) | leftRotate16(s & 0xCCCCCCCCU); \ - gift128b_swap_move(s, s, 0x55551100U, 1); \ - (next)[1] = s; \ - /* Key 2 */ \ - s = (prev)[2]; \ - t = (prev)[3]; \ - (next)[2] = ((t >> 4) & 0x0F000F00U) | ((t & 0x0F000F00U) << 4) | \ - ((t >> 6) & 0x00030003U) | ((t & 0x003F003FU) << 2); \ - /* Key 3 */ \ - (next)[3] = ((s >> 6) & 0x03000300U) | ((s & 0x3F003F00U) << 2) | \ - ((s >> 5) & 0x00070007U) | ((s & 0x001F001FU) << 3); \ - /* Key 4 */ \ - s = (prev)[4]; \ - t = (prev)[5]; \ - (next)[4] = leftRotate8(t & 0xAAAAAAAAU) | \ - leftRotate16(t & 0x55555555U); \ - /* Key 5 */ \ - (next)[5] = leftRotate8(s & 0x55555555U) | \ - leftRotate12(s & 0xAAAAAAAAU); \ - /* Key 6 */ \ - s = (prev)[6]; \ - t = (prev)[7]; \ - (next)[6] = ((t >> 2) & 0x03030303U) | ((t & 0x03030303U) << 2) | \ - ((t >> 1) & 0x70707070U) | ((t & 0x10101010U) << 3); \ - /* Key 7 */ \ - (next)[7] = ((s >> 18) & 0x00003030U) | ((s & 0x01010101U) << 3) | \ - ((s >> 14) & 0x0000C0C0U) | ((s & 0x0000E0E0U) << 15) | \ - ((s >> 1) & 0x07070707U) | ((s & 0x00001010U) << 19); \ - /* Key 8 */ \ - s = (prev)[8]; \ - t = (prev)[9]; \ - (next)[8] = ((t >> 4) & 0x0FFF0000U) | ((t & 0x000F0000U) << 12) | \ - ((t >> 8) & 0x000000FFU) | ((t & 0x000000FFU) << 8); \ - /* Key 9 */ \ - (next)[9] = ((s >> 6) & 0x03FF0000U) | ((s & 0x003F0000U) << 10) | \ - ((s >> 4) & 0x00000FFFU) | ((s & 0x0000000FU) << 12); \ - } while (0) - -/** - * \brief Compute the round keys for GIFT-128 in the fixsliced representation. - * - * \param ks Points to the key schedule to initialize. - * \param k0 First key word. - * \param k1 Second key word. - * \param k2 Third key word. - * \param k3 Fourth key word. - */ -static void gift128b_compute_round_keys - (gift128b_key_schedule_t *ks, - uint32_t k0, uint32_t k1, uint32_t k2, uint32_t k3) -{ - unsigned index; - uint32_t temp; - - /* Set the regular key with k0 and k3 pre-swapped for the round function */ - ks->k[0] = k3; - ks->k[1] = k1; - ks->k[2] = k2; - ks->k[3] = k0; - - /* Pre-compute the keys for rounds 3..10 and permute into fixsliced form */ - for (index = 4; index < 20; index += 2) { - ks->k[index] = ks->k[index - 3]; - temp = ks->k[index - 4]; - temp = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - ks->k[index + 1] = temp; - } - for (index = 0; index < 20; index += 10) { - /* Keys 0 and 10 */ - temp = ks->k[index]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index] = temp; - - /* Keys 1 and 11 */ - temp = ks->k[index + 1]; - gift128b_swap_move(temp, temp, 0x00550055U, 9); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 1] = temp; - - /* Keys 2 and 12 */ - temp = ks->k[index + 2]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 2] = temp; - - /* Keys 3 and 13 */ - temp = ks->k[index + 3]; - gift128b_swap_move(temp, temp, 0x11111111U, 3); - gift128b_swap_move(temp, temp, 0x03030303U, 6); - gift128b_swap_move(temp, temp, 0x000F000FU, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 3] = temp; - - /* Keys 4 and 14 */ - temp = ks->k[index + 4]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 4] = temp; - - /* Keys 5 and 15 */ - temp = ks->k[index + 5]; - gift128b_swap_move(temp, temp, 0x0000AAAAU, 15); - gift128b_swap_move(temp, temp, 0x00003333U, 18); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 5] = temp; - - /* Keys 6 and 16 */ - temp = ks->k[index + 6]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 6] = temp; - - /* Keys 7 and 17 */ - temp = ks->k[index + 7]; - gift128b_swap_move(temp, temp, 0x0A0A0A0AU, 3); - gift128b_swap_move(temp, temp, 0x00CC00CCU, 6); - gift128b_swap_move(temp, temp, 0x0000F0F0U, 12); - gift128b_swap_move(temp, temp, 0x000000FFU, 24); - ks->k[index + 7] = temp; - - /* Keys 8, 9, 18, and 19 do not need any adjustment */ - } - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - /* Derive the fixsliced keys for the remaining rounds 11..40 */ - for (index = 20; index < 80; index += 10) { - gift128b_derive_keys(ks->k + index, ks->k + index - 20); - } -#endif -} - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - gift128b_compute_round_keys - (ks, be_load_word32(key), be_load_word32(key + 4), - be_load_word32(key + 8), be_load_word32(key + 12)); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission */ - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); -} - -/** - * \brief Performs the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_sbox(s0, s1, s2, s3) \ - do { \ - s1 ^= s0 & s2; \ - s0 ^= s1 & s3; \ - s2 ^= s0 | s1; \ - s3 ^= s2; \ - s1 ^= s3; \ - s3 ^= 0xFFFFFFFFU; \ - s2 ^= s0 & s1; \ - } while (0) - -/** - * \brief Performs the inverse of the GIFT-128 S-box on the bit-sliced state. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_sbox(s0, s1, s2, s3) \ - do { \ - s2 ^= s3 & s1; \ - s0 ^= 0xFFFFFFFFU; \ - s1 ^= s0; \ - s0 ^= s2; \ - s2 ^= s3 | s1; \ - s3 ^= s1 & s0; \ - s1 ^= s3 & s2; \ - } while (0) - -/** - * \brief Permutes the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 3) & 0x11111111U) | ((s2 & 0x77777777U) << 1); \ - s3 = ((s3 >> 1) & 0x77777777U) | ((s3 & 0x11111111U) << 3); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 4) & 0x0FFF0FFFU) | ((s0 & 0x000F000FU) << 12); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 12) & 0x000F000FU) | ((s2 & 0x0FFF0FFFU) << 4); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s3 = leftRotate16(s3); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 6) & 0x03030303U) | ((s0 & 0x3F3F3F3FU) << 2); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 2) & 0x3F3F3F3FU) | ((s2 & 0x03030303U) << 6); \ - } while (0); - -/** - * \brief Permutes the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = rightRotate8(s2); \ - s3 = leftRotate8(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 1st and 2nd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_1(s0, s1, s2, s3) \ - do { \ - s1 = ((s1 >> 2) & 0x33333333U) | ((s1 & 0x33333333U) << 2); \ - s2 = ((s2 >> 1) & 0x77777777U) | ((s2 & 0x11111111U) << 3); \ - s3 = ((s3 >> 3) & 0x11111111U) | ((s3 & 0x77777777U) << 1); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 2nd and 3rd mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_2(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 12) & 0x000F000FU) | ((s0 & 0x0FFF0FFFU) << 4); \ - s1 = ((s1 >> 8) & 0x00FF00FFU) | ((s1 & 0x00FF00FFU) << 8); \ - s2 = ((s2 >> 4) & 0x0FFF0FFFU) | ((s2 & 0x000F000FU) << 12); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 3rd and 4th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_3(s0, s1, s2, s3) \ - do { \ - gift128b_swap_move(s1, s1, 0x55555555U, 1); \ - gift128b_swap_move(s2, s2, 0x00005555U, 1); \ - s2 = leftRotate16(s2); \ - gift128b_swap_move(s3, s3, 0x55550000U, 1); \ - s3 = leftRotate16(s3); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 4th and 5th mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_4(s0, s1, s2, s3) \ - do { \ - s0 = ((s0 >> 2) & 0x3F3F3F3FU) | ((s0 & 0x03030303U) << 6); \ - s1 = ((s1 >> 4) & 0x0F0F0F0FU) | ((s1 & 0x0F0F0F0FU) << 4); \ - s2 = ((s2 >> 6) & 0x03030303U) | ((s2 & 0x3F3F3F3FU) << 2); \ - } while (0); - -/** - * \brief Inverts the GIFT-128 state between the 5th and 1st mini-rounds. - * - * \param s0 First word of the bit-sliced state. - * \param s1 Second word of the bit-sliced state. - * \param s2 Third word of the bit-sliced state. - * \param s3 Fourth word of the bit-sliced state. - */ -#define gift128b_inv_permute_state_5(s0, s1, s2, s3) \ - do { \ - s1 = leftRotate16(s1); \ - s2 = leftRotate8(s2); \ - s3 = rightRotate8(s3); \ - } while (0); - -/** - * \brief Performs five fixsliced encryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - * - * The permutation is restructured so that one of the words each round - * does not need to be permuted, with the others rotating left, up, right, - * and down to keep the bits in line with their non-moving counterparts. - * This reduces the number of shifts required significantly. - * - * At the end of five rounds, the bit ordering will return to the - * original position. We then repeat the process for the next 5 rounds. - */ -#define gift128b_encrypt_5_rounds(rk, rc) \ - do { \ - /* 1st round - S-box, rotate left, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_1(s0, s1, s2, s3); \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - \ - /* 2nd round - S-box, rotate up, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_2(s0, s1, s2, s3); \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_3(s0, s1, s2, s3); \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - \ - /* 4th round - S-box, rotate left and swap rows, add round key */ \ - gift128b_sbox(s3, s1, s2, s0); \ - gift128b_permute_state_4(s0, s1, s2, s3); \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - \ - /* 5th round - S-box, rotate up, add round key */ \ - gift128b_sbox(s0, s1, s2, s3); \ - gift128b_permute_state_5(s0, s1, s2, s3); \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - \ - /* Swap s0 and s3 in preparation for the next 1st round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - } while (0) - -/** - * \brief Performs five fixsliced decryption rounds for GIFT-128. - * - * \param rk Points to the 10 round keys for these rounds. - * \param rc Points to the round constants for these rounds. - * - * We perform all 40 rounds of the fixsliced GIFT-128 five at a time. - */ -#define gift128b_decrypt_5_rounds(rk, rc) \ - do { \ - /* Swap s0 and s3 in preparation for the next 5th round */ \ - s0 ^= s3; \ - s3 ^= s0; \ - s0 ^= s3; \ - \ - /* 5th round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[8]; \ - s2 ^= (rk)[9]; \ - s0 ^= (rc)[4]; \ - gift128b_inv_permute_state_5(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 4th round - S-box, rotate right and swap rows, add round key */ \ - s1 ^= (rk)[6]; \ - s2 ^= (rk)[7]; \ - s3 ^= (rc)[3]; \ - gift128b_inv_permute_state_4(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 3rd round - S-box, swap columns, add round key */ \ - s1 ^= (rk)[4]; \ - s2 ^= (rk)[5]; \ - s0 ^= (rc)[2]; \ - gift128b_inv_permute_state_3(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - \ - /* 2nd round - S-box, rotate down, add round key */ \ - s1 ^= (rk)[2]; \ - s2 ^= (rk)[3]; \ - s3 ^= (rc)[1]; \ - gift128b_inv_permute_state_2(s0, s1, s2, s3); \ - gift128b_inv_sbox(s0, s1, s2, s3); \ - \ - /* 1st round - S-box, rotate right, add round key */ \ - s1 ^= (rk)[0]; \ - s2 ^= (rk)[1]; \ - s0 ^= (rc)[0]; \ - gift128b_inv_permute_state_1(s0, s1, s2, s3); \ - gift128b_inv_sbox(s3, s1, s2, s0); \ - } while (0) - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) -{ - /* Mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = be_load_word32(key + 12); - ks->k[1] = be_load_word32(key + 4); - ks->k[2] = be_load_word32(key + 8); - ks->k[3] = be_load_word32(key); -} - -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) -{ - /* Use the little-endian key byte order from the HYENA submission - * and mirror the fixslicing word order of 3, 1, 2, 0 */ - ks->k[0] = le_load_word32(key); - ks->k[1] = le_load_word32(key + 8); - ks->k[2] = le_load_word32(key + 4); - ks->k[3] = le_load_word32(key + 12); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t k[20]; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_derive_keys(k, ks->k); - gift128b_derive_keys(k + 10, ks->k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_derive_keys(k, k); - gift128b_derive_keys(k + 10, k + 10); - gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into local variables */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_TINY */ - -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer */ - s0 = input[0]; - s1 = input[1]; - s2 = input[2]; - s3 = input[3]; - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer */ - output[0] = s0; - output[1] = s1; - output[2] = s2; - output[3] = s3; -} - -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* The key schedule is initialized with the key itself */ - w0 = ks->k[3]; - w1 = ks->k[1]; - w2 = ks->k[2]; - w3 = ks->k[0]; - - /* Perform all 40 rounds */ - for (round = 0; round < 40; ++round) { - /* SubCells - apply the S-box */ - s1 ^= s0 & s2; - s0 ^= s1 & s3; - s2 ^= s0 | s1; - s3 ^= s2; - s1 ^= s3; - s3 ^= 0xFFFFFFFFU; - s2 ^= s0 & s1; - temp = s0; - s0 = s3; - s3 = temp; - - /* PermBits - apply the 128-bit permutation */ - PERM0(s0); - PERM1(s1); - PERM2(s2); - PERM3(s3); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round]; - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if (((round + 1) % 5) == 0 && round < 39) - s0 ^= tweak; - - /* Rotate the key schedule */ - temp = w3; - w3 = w2; - w2 = w1; - w1 = w0; - w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - } - - /* Pack the state into the ciphertext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_TINY */ - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the plaintext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the ciphertext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - - /* Copy the ciphertext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); - s0 ^= tweak; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -/* The small variant uses fixslicing for encryption, but we need to change - * to bitslicing for decryption because of the difficulty of fast-forwarding - * the fixsliced key schedule to the end. So the tiny variant is used for - * decryption when the small variant is selected. Since the NIST AEAD modes - * for GIFT-128 only use the block encrypt operation, the inefficiencies - * in decryption don't matter all that much */ - -/** - * \def gift128b_load_and_forward_schedule() - * \brief Generate the decryption key at the end of the last round. - * - * To do that, we run the block operation forward to determine the - * final state of the key schedule after the last round: - * - * w0 = ks->k[0]; - * w1 = ks->k[1]; - * w2 = ks->k[2]; - * w3 = ks->k[3]; - * for (round = 0; round < 40; ++round) { - * temp = w3; - * w3 = w2; - * w2 = w1; - * w1 = w0; - * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | - * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); - * } - * - * We can short-cut all of the above by noticing that we don't need - * to do the word rotations. Every 4 rounds, the rotation alignment - * returns to the original position and each word has been rotated - * by applying the "2 right and 4 left" bit-rotation step to it. - * We then repeat that 10 times for the full 40 rounds. The overall - * effect is to apply a "20 right and 40 left" bit-rotation to every - * word in the key schedule. That is equivalent to "4 right and 8 left" - * on the 16-bit sub-words. - */ -#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#else -/* The small variant needs to also undo some of the rotations that were - * done to generate the fixsliced version of the key schedule */ -#define gift128b_load_and_forward_schedule() \ - do { \ - w0 = ks->k[3]; \ - w1 = ks->k[1]; \ - w2 = ks->k[2]; \ - w3 = ks->k[0]; \ - gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ - gift128b_swap_move(w3, w3, 0x00003333U, 18); \ - gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ - gift128b_swap_move(w3, w3, 0x00550055U, 9); \ - gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ - gift128b_swap_move(w1, w1, 0x00003333U, 18); \ - gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ - gift128b_swap_move(w1, w1, 0x00550055U, 9); \ - gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ - gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ - gift128b_swap_move(w2, w2, 0x03030303U, 6); \ - gift128b_swap_move(w2, w2, 0x11111111U, 3); \ - gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ - gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ - gift128b_swap_move(w0, w0, 0x03030303U, 6); \ - gift128b_swap_move(w0, w0, 0x11111111U, 3); \ - w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ - ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ - w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ - ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ - w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ - ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ - w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ - ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ - } while (0) -#endif - -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the ciphertext into the state buffer and convert from big endian */ - s0 = be_load_word32(input); - s1 = be_load_word32(input + 4); - s2 = be_load_word32(input + 8); - s3 = be_load_word32(input + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in big endian */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); -} - -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak) -{ - uint32_t s0, s1, s2, s3; - uint32_t w0, w1, w2, w3; - uint32_t temp; - uint8_t round; - - /* Copy the plaintext into the state buffer and convert from nibbles */ - gift128n_to_words(output, input); - s0 = be_load_word32(output); - s1 = be_load_word32(output + 4); - s2 = be_load_word32(output + 8); - s3 = be_load_word32(output + 12); - - /* Generate the decryption key at the end of the last round */ - gift128b_load_and_forward_schedule(); - - /* Perform all 40 rounds */ - for (round = 40; round > 0; --round) { - /* Rotate the key schedule backwards */ - temp = w0; - w0 = w1; - w1 = w2; - w2 = w3; - w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | - ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); - - /* AddTweak - XOR in the tweak every 5 rounds except the last */ - if ((round % 5) == 0 && round < 40) - s0 ^= tweak; - - /* AddRoundKey - XOR in the key schedule and the round constant */ - s2 ^= w1; - s1 ^= w3; - s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; - - /* InvPermBits - apply the inverse of the 128-bit permutation */ - INV_PERM0(s0); - INV_PERM1(s1); - INV_PERM2(s2); - INV_PERM3(s3); - - /* InvSubCells - apply the inverse of the S-box */ - temp = s0; - s0 = s3; - s3 = temp; - s2 ^= s0 & s1; - s3 ^= 0xFFFFFFFFU; - s1 ^= s3; - s3 ^= s2; - s2 ^= s0 | s1; - s0 ^= s1 & s3; - s1 ^= s0 & s2; - } - - /* Pack the state into the plaintext buffer in nibble form */ - be_store_word32(output, s0); - be_store_word32(output + 4, s1); - be_store_word32(output + 8, s2); - be_store_word32(output + 12, s3); - gift128n_to_nibbles(output, output); -} - -#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ - -#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.h deleted file mode 100644 index f57d143..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_GIFT128_H -#define LW_INTERNAL_GIFT128_H - -/** - * \file internal-gift128.h - * \brief GIFT-128 block cipher. - * - * There are three versions of GIFT-128 in use within the second round - * submissions to the NIST lightweight cryptography competition. - * - * The most efficient version for 32-bit software implementation is the - * GIFT-128-b bit-sliced version from GIFT-COFB and SUNDAE-GIFT. - * - * The second is the nibble-based version from HYENA. We implement the - * HYENA version as a wrapper around the bit-sliced version. - * - * The third version is a variant on the HYENA nibble-based version that - * includes a 4-bit tweak value for domain separation. It is used by - * the ESTATE submission to NIST. - * - * Technically there is a fourth version of GIFT-128 which is the one that - * appeared in the original GIFT-128 paper. It is almost the same as the - * HYENA version except that the byte ordering is big-endian instead of - * HYENA's little-endian. The original version of GIFT-128 doesn't appear - * in any of the NIST submissions so we don't bother with it in this library. - * - * References: https://eprint.iacr.org/2017/622.pdf, - * https://eprint.iacr.org/2020/412.pdf, - * https://giftcipher.github.io/gift/ - */ - -#include -#include -#include "internal-gift128-config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of a GIFT-128 block in bytes. - */ -#define GIFT128_BLOCK_SIZE 16 - -/** - * \var GIFT128_ROUND_KEYS - * \brief Number of round keys for the GIFT-128 key schedule. - */ -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY -#define GIFT128_ROUND_KEYS 4 -#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL -#define GIFT128_ROUND_KEYS 20 -#else -#define GIFT128_ROUND_KEYS 80 -#endif - -/** - * \brief Structure of the key schedule for GIFT-128 (bit-sliced). - */ -typedef struct -{ - /** Pre-computed round keys for bit-sliced GIFT-128 */ - uint32_t k[GIFT128_ROUND_KEYS]; - -} gift128b_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (bit-sliced). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128b_encrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced and pre-loaded). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This version assumes that the input has already been pre-loaded from - * big-endian into host byte order in the supplied word array. The output - * is delivered in the same way. - */ -void gift128b_encrypt_preloaded - (const gift128b_key_schedule_t *ks, uint32_t output[4], - const uint32_t input[4]); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (bit-sliced). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128b_decrypt - (const gift128b_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Structure of the key schedule for GIFT-128 (nibble-based). - */ -typedef gift128b_key_schedule_t gift128n_key_schedule_t; - -/** - * \brief Initializes the key schedule for GIFT-128 (nibble-based). - * - * \param ks Points to the key schedule to initialize. - * \param key Points to the 16 bytes of the key data. - */ -void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); - -/** - * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - */ -void gift128n_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/** - * \brief Decrypts a 128-bit block with GIFT-128 (nibble-based). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * - * The \a input and \a output buffers can be the same buffer for - * in-place decryption. - */ -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input); - -/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ -#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ -#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ -#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ -#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ -#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ -#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ -#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ -#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ -#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ -#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ -#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ -#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ -#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ -#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ -#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ -#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ - -/** - * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_encrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -/** - * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). - * - * \param ks Points to the GIFT-128 key schedule. - * \param output Output buffer which must be at least 16 bytes in length. - * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value expanded to 32-bit. - * - * The \a input and \a output buffers can be the same buffer for - * in-place encryption. - * - * This variant of GIFT-128 is used by the ESTATE submission to the - * NIST Lightweight Cryptography Competition. A 4-bit tweak is added to - * some of the rounds to provide domain separation. If the tweak is - * zero, then this function is identical to gift128n_encrypt(). - */ -void gift128t_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, uint32_t tweak); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-avr.S deleted file mode 100644 index 641613a..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-avr.S +++ /dev/null @@ -1,2104 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 40 -table_0: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - movw r30,r24 - movw r26,r22 -.L__stack_usage = 2 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - st Z,r18 - std Z+1,r19 - std Z+2,r20 - std Z+3,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+4,r18 - std Z+5,r19 - std Z+6,r20 - std Z+7,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+8,r18 - std Z+9,r19 - std Z+10,r20 - std Z+11,r21 - ld r21,X+ - ld r20,X+ - ld r19,X+ - ld r18,X+ - std Z+12,r18 - std Z+13,r19 - std Z+14,r20 - std Z+15,r21 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 36 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - mov r16,r1 -46: - rcall 199f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - rcall 199f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - rcall 199f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - rcall 199f - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - lsl r26 - rol r27 - adc r26,r1 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - ldi r17,40 - cpse r16,r17 - rjmp 46b - rjmp 548f -199: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - movw r18,r22 - movw r20,r2 - mov r0,r4 - and r0,r18 - eor r8,r0 - mov r0,r5 - and r0,r19 - eor r9,r0 - mov r0,r6 - and r0,r20 - eor r10,r0 - mov r0,r7 - and r0,r21 - eor r11,r0 - movw r22,r12 - movw r2,r14 - movw r12,r18 - movw r14,r20 - bst r22,1 - bld r0,0 - bst r22,4 - bld r22,1 - bst r2,0 - bld r22,4 - bst r22,2 - bld r2,0 - bst r23,0 - bld r22,2 - bst r22,3 - bld r23,0 - bst r23,4 - bld r22,3 - bst r2,3 - bld r23,4 - bst r23,6 - bld r2,3 - bst r3,3 - bld r23,6 - bst r23,5 - bld r3,3 - bst r2,7 - bld r23,5 - bst r3,6 - bld r2,7 - bst r3,1 - bld r3,6 - bst r22,5 - bld r3,1 - bst r2,4 - bld r22,5 - bst r2,2 - bld r2,4 - bst r23,2 - bld r2,2 - bst r23,3 - bld r23,2 - bst r23,7 - bld r23,3 - bst r3,7 - bld r23,7 - bst r3,5 - bld r3,7 - bst r2,5 - bld r3,5 - bst r2,6 - bld r2,5 - bst r3,2 - bld r2,6 - bst r23,1 - bld r3,2 - bst r22,7 - bld r23,1 - bst r3,4 - bld r22,7 - bst r2,1 - bld r3,4 - bst r22,6 - bld r2,1 - bst r3,0 - bld r22,6 - bst r0,0 - bld r3,0 - bst r4,0 - bld r0,0 - bst r4,1 - bld r4,0 - bst r4,5 - bld r4,1 - bst r6,5 - bld r4,5 - bst r6,7 - bld r6,5 - bst r7,7 - bld r6,7 - bst r7,6 - bld r7,7 - bst r7,2 - bld r7,6 - bst r5,2 - bld r7,2 - bst r5,0 - bld r5,2 - bst r0,0 - bld r5,0 - bst r4,2 - bld r0,0 - bst r5,1 - bld r4,2 - bst r4,4 - bld r5,1 - bst r6,1 - bld r4,4 - bst r4,7 - bld r6,1 - bst r7,5 - bld r4,7 - bst r6,6 - bld r7,5 - bst r7,3 - bld r6,6 - bst r5,6 - bld r7,3 - bst r7,0 - bld r5,6 - bst r0,0 - bld r7,0 - bst r4,3 - bld r0,0 - bst r5,5 - bld r4,3 - bst r6,4 - bld r5,5 - bst r6,3 - bld r6,4 - bst r5,7 - bld r6,3 - bst r7,4 - bld r5,7 - bst r6,2 - bld r7,4 - bst r5,3 - bld r6,2 - bst r5,4 - bld r5,3 - bst r6,0 - bld r5,4 - bst r0,0 - bld r6,0 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r8,2 - bld r8,0 - bst r9,2 - bld r8,2 - bst r9,1 - bld r9,2 - bst r8,5 - bld r9,1 - bst r10,6 - bld r8,5 - bst r11,0 - bld r10,6 - bst r8,3 - bld r11,0 - bst r9,6 - bld r8,3 - bst r11,1 - bld r9,6 - bst r8,7 - bld r11,1 - bst r11,6 - bld r8,7 - bst r11,3 - bld r11,6 - bst r9,7 - bld r11,3 - bst r11,5 - bld r9,7 - bst r10,7 - bld r11,5 - bst r11,4 - bld r10,7 - bst r10,3 - bld r11,4 - bst r9,4 - bld r10,3 - bst r10,1 - bld r9,4 - bst r8,4 - bld r10,1 - bst r10,2 - bld r8,4 - bst r9,0 - bld r10,2 - bst r8,1 - bld r9,0 - bst r8,6 - bld r8,1 - bst r11,2 - bld r8,6 - bst r9,3 - bld r11,2 - bst r9,5 - bld r9,3 - bst r10,5 - bld r9,5 - bst r10,4 - bld r10,5 - bst r10,0 - bld r10,4 - bst r0,0 - bld r10,0 - bst r12,0 - bld r0,0 - bst r12,3 - bld r12,0 - bst r13,7 - bld r12,3 - bst r15,6 - bld r13,7 - bst r15,0 - bld r15,6 - bst r0,0 - bld r15,0 - bst r12,1 - bld r0,0 - bst r12,7 - bld r12,1 - bst r15,7 - bld r12,7 - bst r15,4 - bld r15,7 - bst r14,0 - bld r15,4 - bst r0,0 - bld r14,0 - bst r12,2 - bld r0,0 - bst r13,3 - bld r12,2 - bst r13,6 - bld r13,3 - bst r15,2 - bld r13,6 - bst r13,0 - bld r15,2 - bst r0,0 - bld r13,0 - bst r12,4 - bld r0,0 - bst r14,3 - bld r12,4 - bst r13,5 - bld r14,3 - bst r14,6 - bld r13,5 - bst r15,1 - bld r14,6 - bst r0,0 - bld r15,1 - bst r12,5 - bld r0,0 - bst r14,7 - bld r12,5 - bst r15,5 - bld r14,7 - bst r14,4 - bld r15,5 - bst r14,1 - bld r14,4 - bst r0,0 - bld r14,1 - bst r12,6 - bld r0,0 - bst r15,3 - bld r12,6 - bst r13,4 - bld r15,3 - bst r14,2 - bld r13,4 - bst r13,1 - bld r14,2 - bst r0,0 - bld r13,1 - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - inc r16 - ret -548: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r17,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-full-avr.S deleted file mode 100644 index ff11875..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-full-avr.S +++ /dev/null @@ -1,5037 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_FULL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 18 - ld r13,X+ - ld r12,X+ - ld r11,X+ - ld r10,X+ - ld r5,X+ - ld r4,X+ - ld r3,X+ - ld r2,X+ - ld r9,X+ - ld r8,X+ - ld r7,X+ - ld r6,X+ - ld r29,X+ - ld r28,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - ldi r24,4 -33: - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r29 - ror r28 - ror r0 - lsr r29 - ror r28 - ror r0 - or r29,r0 - st Z+,r22 - st Z+,r23 - st Z+,r28 - st Z+,r29 - mov r0,r22 - mov r22,r2 - mov r2,r0 - mov r0,r23 - mov r23,r3 - mov r3,r0 - mov r0,r28 - mov r28,r4 - mov r4,r0 - mov r0,r29 - mov r29,r5 - mov r5,r0 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - lsl r6 - rol r7 - adc r6,r1 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - mov r0,r6 - mov r6,r10 - mov r10,r0 - mov r0,r7 - mov r7,r11 - mov r11,r0 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r28,Z+2 - ldd r29,Z+3 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - st Z,r29 - std Z+1,r23 - std Z+2,r28 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r28,Z+6 - ldd r29,Z+7 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+4,r29 - std Z+5,r23 - std Z+6,r28 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r28,Z+10 - ldd r29,Z+11 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+8,r29 - std Z+9,r23 - std Z+10,r28 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r28,Z+14 - ldd r29,Z+15 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+12,r29 - std Z+13,r23 - std Z+14,r28 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+16,r29 - std Z+17,r23 - std Z+18,r28 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+20,r29 - std Z+21,r23 - std Z+22,r28 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+24,r29 - std Z+25,r23 - std Z+26,r28 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r28 - eor r21,r29 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - movw r18,r22 - movw r20,r28 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r28,r20 - eor r29,r21 - std Z+28,r29 - std Z+29,r23 - std Z+30,r28 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - adiw r30,40 - movw r26,r30 - subi r26,80 - sbc r27,r1 - ldi r24,6 -1274: - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r2 - eor r19,r3 - andi r18,51 - andi r19,51 - eor r2,r18 - eor r3,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - st Z,r2 - std Z+1,r3 - std Z+2,r4 - std Z+3,r5 - movw r18,r22 - movw r20,r28 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - andi r28,204 - andi r29,204 - or r28,r21 - or r29,r18 - or r22,r19 - or r23,r20 - movw r18,r28 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r28 - eor r19,r29 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r28,r18 - eor r29,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r28 - std Z+5,r29 - std Z+6,r22 - std Z+7,r23 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - swap r3 - lsl r4 - adc r4,r1 - lsl r4 - adc r4,r1 - swap r5 - std Z+8,r2 - std Z+9,r3 - std Z+10,r4 - std Z+11,r5 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r28 - adc r28,r1 - lsl r29 - adc r29,r1 - lsl r29 - adc r29,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r28 - std Z+15,r29 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - ldi r25,85 - and r2,r25 - and r3,r25 - and r4,r25 - and r5,r25 - or r2,r19 - or r3,r20 - or r4,r21 - or r5,r18 - std Z+16,r4 - std Z+17,r5 - std Z+18,r2 - std Z+19,r3 - movw r18,r22 - movw r20,r28 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - andi r28,170 - andi r29,170 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - lsl r22 - rol r23 - rol r28 - rol r29 - adc r22,r1 - or r22,r18 - or r23,r19 - or r28,r20 - or r29,r21 - std Z+20,r29 - std Z+21,r22 - std Z+22,r23 - std Z+23,r28 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r4 - eor r21,r5 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r4,r20 - eor r5,r21 - movw r18,r2 - movw r20,r4 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r14,r18 - movw r16,r20 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - lsr r17 - ror r16 - ror r15 - ror r14 - eor r14,r18 - eor r15,r19 - eor r16,r20 - eor r17,r21 - ldi r25,8 - and r14,r25 - and r15,r25 - andi r16,8 - andi r17,8 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - lsl r14 - rol r15 - rol r16 - rol r17 - eor r18,r14 - eor r19,r15 - eor r20,r16 - eor r21,r17 - ldi r17,15 - and r2,r17 - and r3,r17 - and r4,r17 - and r5,r17 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - std Z+24,r2 - std Z+25,r3 - std Z+26,r4 - std Z+27,r5 - movw r18,r28 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r2,r22 - movw r4,r28 - ldi r16,1 - and r2,r16 - and r3,r16 - and r4,r16 - and r5,r16 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - lsl r2 - rol r3 - rol r4 - rol r5 - or r2,r18 - or r3,r19 - movw r18,r28 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r2,r18 - or r3,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r4,r18 - or r5,r19 - movw r18,r22 - movw r20,r28 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r2,r18 - or r3,r19 - or r4,r20 - or r5,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r4,r22 - or r5,r23 - std Z+28,r2 - std Z+29,r3 - std Z+30,r4 - std Z+31,r5 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - mov r0,r1 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - lsr r5 - ror r4 - ror r0 - or r5,r0 - std Z+32,r3 - std Z+33,r2 - std Z+34,r4 - std Z+35,r5 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r28 - mov r28,r29 - mov r29,r0 - lsl r28 - rol r29 - adc r28,r1 - lsl r28 - rol r29 - adc r28,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r28 - std Z+39,r29 - dec r24 - breq 1733f - adiw r30,40 - rjmp 1274b -1733: - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r30 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rcall 27f - rjmp 765f -27: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -765: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e -.L__stack_usage = 19 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r30 - subi r26,192 - sbci r27,254 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,160 - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rcall 30f - rjmp 768f -30: - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r11 - mov r11,r10 - mov r10,r9 - mov r9,r8 - mov r8,r0 - mov r0,r12 - mov r12,r13 - mov r13,r14 - mov r14,r15 - mov r15,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r1 - lsr r22 - ror r0 - lsr r22 - ror r0 - or r22,r0 - mov r0,r1 - lsr r23 - ror r0 - lsr r23 - ror r0 - or r23,r0 - mov r0,r1 - lsr r2 - ror r0 - lsr r2 - ror r0 - or r2,r0 - mov r0,r1 - lsr r3 - ror r0 - lsr r3 - ror r0 - or r3,r0 - swap r4 - swap r5 - swap r6 - swap r7 - lsl r8 - adc r8,r1 - lsl r8 - adc r8,r1 - lsl r9 - adc r9,r1 - lsl r9 - adc r9,r1 - lsl r10 - adc r10,r1 - lsl r10 - adc r10,r1 - lsl r11 - adc r11,r1 - lsl r11 - adc r11,r1 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - mov r0,r1 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - lsr r9 - ror r8 - ror r0 - or r9,r0 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - com r22 - com r23 - com r2 - com r3 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - dec r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - dec r30 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - ld r21,-X - ld r20,-X - ld r19,-X - ld r18,-X - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,119 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,17 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -768: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+1 - ldd r27,Y+2 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - pop r0 - pop r0 - pop r17 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-small-avr.S deleted file mode 100644 index 77ef9fd..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-small-avr.S +++ /dev/null @@ -1,6053 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -33: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5115f - rjmp 33b -5115: - subi r30,80 - sbc r31,r1 - ldi r24,2 -119: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1268f - adiw r30,40 - rjmp 119b -1268: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ldi r24,20 -1: - ld r22,Z+ - ld r23,Z+ - ld r2,Z+ - ld r3,Z+ - std Y+1,r22 - std Y+2,r23 - std Y+3,r2 - std Y+4,r3 - adiw r28,4 - dec r24 - brne 1b - subi r28,80 - sbc r29,r1 - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 73f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 811f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 73f - rcall 73f - rjmp 1285f -73: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -811: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -1285: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r25 - mov r25,r26 - mov r26,r0 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -678: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 830f - cpse r16,r1 - rjmp 678b - rjmp 1175f -830: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -1175: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-tiny-avr.S deleted file mode 100644 index e7a03f1..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-gift128b-tiny-avr.S +++ /dev/null @@ -1,6766 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - -#include "internal-gift128-config.h" - -#if GIFT128_VARIANT == GIFT128_VARIANT_TINY - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_0, @object - .size table_0, 160 -table_0: - .byte 8 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 128 - .byte 1 - .byte 128 - .byte 2 - .byte 0 - .byte 0 - .byte 84 - .byte 129 - .byte 1 - .byte 1 - .byte 1 - .byte 31 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 136 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 81 - .byte 128 - .byte 1 - .byte 3 - .byte 3 - .byte 47 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 96 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 65 - .byte 128 - .byte 0 - .byte 3 - .byte 3 - .byte 39 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 224 - .byte 1 - .byte 64 - .byte 2 - .byte 0 - .byte 80 - .byte 17 - .byte 128 - .byte 1 - .byte 2 - .byte 3 - .byte 43 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 8 - .byte 8 - .byte 16 - .byte 0 - .byte 64 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 64 - .byte 1 - .byte 128 - .byte 0 - .byte 2 - .byte 2 - .byte 33 - .byte 0 - .byte 0 - .byte 128 - .byte 128 - .byte 0 - .byte 0 - .byte 16 - .byte 0 - .byte 192 - .byte 1 - .byte 0 - .byte 2 - .byte 0 - .byte 0 - .byte 81 - .byte 128 - .byte 1 - .byte 1 - .byte 3 - .byte 46 - .byte 0 - .byte 0 - .byte 128 - .byte 0 - .byte 136 - .byte 8 - .byte 16 - .byte 0 - .byte 32 - .byte 1 - .byte 96 - .byte 2 - .byte 0 - .byte 80 - .byte 64 - .byte 128 - .byte 0 - .byte 3 - .byte 1 - .byte 6 - .byte 0 - .byte 0 - .byte 128 - .byte 8 - .byte 136 - .byte 0 - .byte 16 - .byte 0 - .byte 160 - .byte 1 - .byte 192 - .byte 2 - .byte 0 - .byte 80 - .byte 20 - .byte 129 - .byte 1 - .byte 2 - .byte 1 - .byte 26 - .byte 0 - .byte 0 - .byte 128 - - .text -.global gift128b_init - .type gift128b_init, @function -gift128b_init: - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 - movw r26,r22 -.L__stack_usage = 16 - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - st Z,r22 - std Z+1,r23 - std Z+2,r2 - std Z+3,r3 - std Z+4,r4 - std Z+5,r5 - std Z+6,r6 - std Z+7,r7 - std Z+8,r8 - std Z+9,r9 - std Z+10,r10 - std Z+11,r11 - std Z+12,r12 - std Z+13,r13 - std Z+14,r14 - std Z+15,r15 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - ret - .size gift128b_init, .-gift128b_init - - .text -.global gift128b_encrypt - .type gift128b_encrypt, @function -gift128b_encrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt, .-gift128b_encrypt - - .text -.global gift128b_encrypt_preloaded - .type gift128b_encrypt_preloaded, @function -gift128b_encrypt_preloaded: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - subi r28,80 - sbci r29,0 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 100 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r4,Z+4 - ldd r5,Z+5 - ldd r6,Z+6 - ldd r7,Z+7 - ldd r8,Z+8 - ldd r9,Z+9 - ldd r10,Z+10 - ldd r11,Z+11 - ldd r12,Z+12 - ldd r13,Z+13 - ldd r14,Z+14 - ldd r15,Z+15 - movw r30,r28 - adiw r30,1 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - ldi r24,4 -35: - st Z+,r4 - st Z+,r5 - st Z+,r6 - st Z+,r7 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - lsl r22 - rol r23 - adc r22,r1 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - st Z+,r22 - st Z+,r23 - st Z+,r2 - st Z+,r3 - mov r0,r22 - mov r22,r4 - mov r4,r0 - mov r0,r23 - mov r23,r5 - mov r5,r0 - mov r0,r2 - mov r2,r6 - mov r6,r0 - mov r0,r3 - mov r3,r7 - mov r7,r0 - st Z+,r12 - st Z+,r13 - st Z+,r14 - st Z+,r15 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - mov r0,r1 - lsr r11 - ror r10 - ror r0 - lsr r11 - ror r10 - ror r0 - or r11,r0 - st Z+,r8 - st Z+,r9 - st Z+,r10 - st Z+,r11 - mov r0,r8 - mov r8,r12 - mov r12,r0 - mov r0,r9 - mov r9,r13 - mov r13,r0 - mov r0,r10 - mov r10,r14 - mov r14,r0 - mov r0,r11 - mov r11,r15 - mov r15,r0 - dec r24 - breq 5117f - rjmp 35b -5117: - subi r30,80 - sbc r31,r1 - ldi r24,2 -121: - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - st Z,r3 - std Z+1,r23 - std Z+2,r2 - std Z+3,r22 - ldd r22,Z+4 - ldd r23,Z+5 - ldd r2,Z+6 - ldd r3,Z+7 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,85 - mov r19,r1 - andi r20,85 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+4,r3 - std Z+5,r23 - std Z+6,r2 - std Z+7,r22 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+8,r3 - std Z+9,r23 - std Z+10,r2 - std Z+11,r22 - ldd r22,Z+12 - ldd r23,Z+13 - ldd r2,Z+14 - ldd r3,Z+15 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,17 - andi r19,17 - andi r20,17 - andi r21,17 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,15 - mov r19,r1 - andi r20,15 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+12,r3 - std Z+13,r23 - std Z+14,r2 - std Z+15,r22 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+16,r3 - std Z+17,r23 - std Z+18,r2 - std Z+19,r22 - ldd r22,Z+20 - ldd r23,Z+21 - ldd r2,Z+22 - ldd r3,Z+23 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r19 - rol r20 - rol r21 - rol r0 - movw r18,r20 - mov r20,r0 - mov r21,r1 - eor r18,r22 - eor r19,r23 - andi r18,170 - andi r19,170 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r0,r1 - lsr r20 - ror r19 - ror r18 - ror r0 - movw r20,r18 - mov r19,r0 - mov r18,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - movw r18,r20 - mov r20,r1 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,51 - andi r19,51 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+20,r3 - std Z+21,r23 - std Z+22,r2 - std Z+23,r22 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+24,r3 - std Z+25,r23 - std Z+26,r2 - std Z+27,r22 - ldd r22,Z+28 - ldd r23,Z+29 - ldd r2,Z+30 - ldd r3,Z+31 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,10 - andi r19,10 - andi r20,10 - andi r21,10 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r0,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - lsl r18 - rol r19 - rol r20 - rol r21 - rol r0 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r0 - eor r18,r22 - eor r19,r23 - eor r20,r2 - eor r21,r3 - andi r18,204 - mov r19,r1 - andi r20,204 - mov r21,r1 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - lsr r21 - ror r20 - ror r19 - ror r18 - ror r0 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r0 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - movw r18,r22 - movw r20,r2 - mov r18,r19 - mov r19,r20 - mov r20,r21 - mov r21,r1 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r22 - eor r19,r23 - andi r18,240 - andi r19,240 - eor r22,r18 - eor r23,r19 - mov r20,r1 - mov r21,r1 - mov r21,r20 - mov r20,r19 - mov r19,r18 - mov r18,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - std Z+28,r3 - std Z+29,r23 - std Z+30,r2 - std Z+31,r22 - dec r24 - breq 1270f - adiw r30,40 - rjmp 121b -1270: - ld r22,X+ - ld r23,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - ld r14,X+ - ld r15,X+ - movw r26,r28 - adiw r26,1 - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,20 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,40 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,60 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,80 - sbiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,100 - adiw r26,40 - rcall 1329f -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - rcall 2067f - ldi r30,lo8(table_0) - ldi r31,hi8(table_0) -#if defined(RAMPZ) - ldi r24,hh8(table_0) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r24 -#endif - ldi r30,120 - sbiw r26,40 - rcall 1329f - rcall 1329f - rjmp 2541f -1329: - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,204 - andi r19,204 - andi r20,204 - andi r21,204 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - ldi r25,51 - and r4,r25 - and r5,r25 - and r6,r25 - and r7,r25 - or r4,r18 - or r5,r19 - or r6,r20 - or r7,r21 - movw r18,r8 - movw r20,r10 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,238 - andi r19,238 - andi r20,238 - andi r21,238 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - lsr r11 - ror r10 - ror r9 - ror r8 - ldi r24,17 - and r8,r24 - and r9,r24 - and r10,r24 - and r11,r24 - or r8,r18 - or r9,r19 - or r10,r20 - or r11,r21 - movw r18,r12 - movw r20,r14 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - andi r18,136 - andi r19,136 - andi r20,136 - andi r21,136 - lsr r15 - ror r14 - ror r13 - ror r12 - ldi r17,119 - and r12,r17 - and r13,r17 - and r14,r17 - and r15,r17 - or r12,r18 - or r13,r19 - or r14,r20 - or r15,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r1 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - lsr r3 - ror r2 - ror r0 - or r3,r0 - mov r0,r5 - mov r5,r4 - mov r4,r0 - mov r0,r7 - mov r7,r6 - mov r6,r0 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r8 - rol r9 - adc r8,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - lsl r10 - rol r11 - adc r10,r1 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - movw r18,r4 - movw r20,r6 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - mov r0,r10 - mov r10,r8 - mov r8,r0 - mov r0,r11 - mov r11,r9 - mov r9,r0 - movw r18,r8 - movw r20,r10 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r8 - eor r19,r9 - andi r18,85 - andi r19,85 - eor r8,r18 - eor r9,r19 - mov r20,r1 - mov r21,r1 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - mov r0,r14 - mov r14,r12 - mov r12,r0 - mov r0,r15 - mov r15,r13 - mov r13,r0 - movw r18,r14 - lsr r19 - ror r18 - eor r18,r14 - eor r19,r15 - andi r18,85 - andi r19,85 - eor r14,r18 - eor r15,r19 - lsl r18 - rol r19 - eor r14,r18 - eor r15,r19 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - mov r0,r12 - and r0,r8 - eor r4,r0 - mov r0,r13 - and r0,r9 - eor r5,r0 - mov r0,r14 - and r0,r10 - eor r6,r0 - mov r0,r15 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r22 - eor r12,r0 - mov r0,r5 - and r0,r23 - eor r13,r0 - mov r0,r6 - and r0,r2 - eor r14,r0 - mov r0,r7 - and r0,r3 - eor r15,r0 - mov r0,r12 - or r0,r4 - eor r8,r0 - mov r0,r13 - or r0,r5 - eor r9,r0 - mov r0,r14 - or r0,r6 - eor r10,r0 - mov r0,r15 - or r0,r7 - eor r11,r0 - eor r22,r8 - eor r23,r9 - eor r2,r10 - eor r3,r11 - eor r4,r22 - eor r5,r23 - eor r6,r2 - eor r7,r3 - com r22 - com r23 - com r2 - com r3 - mov r0,r12 - and r0,r4 - eor r8,r0 - mov r0,r13 - and r0,r5 - eor r9,r0 - mov r0,r14 - and r0,r6 - eor r10,r0 - mov r0,r15 - and r0,r7 - eor r11,r0 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - swap r4 - swap r5 - swap r6 - swap r7 - mov r0,r1 - lsr r8 - ror r0 - lsr r8 - ror r0 - or r8,r0 - mov r0,r1 - lsr r9 - ror r0 - lsr r9 - ror r0 - or r9,r0 - mov r0,r1 - lsr r10 - ror r0 - lsr r10 - ror r0 - or r10,r0 - mov r0,r1 - lsr r11 - ror r0 - lsr r11 - ror r0 - or r11,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r12,r18 - eor r13,r19 - eor r14,r20 - eor r15,r21 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - com r12 - com r13 - com r14 - com r15 - mov r0,r22 - and r0,r4 - eor r8,r0 - mov r0,r23 - and r0,r5 - eor r9,r0 - mov r0,r2 - and r0,r6 - eor r10,r0 - mov r0,r3 - and r0,r7 - eor r11,r0 - mov r0,r6 - mov r6,r4 - mov r4,r0 - mov r0,r7 - mov r7,r5 - mov r5,r0 - mov r0,r8 - mov r8,r9 - mov r9,r10 - mov r10,r11 - mov r11,r0 - mov r0,r15 - mov r15,r14 - mov r14,r13 - mov r13,r12 - mov r12,r0 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ld r18,X+ - ld r19,X+ - ld r20,X+ - ld r21,X+ - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r19,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r19,Z -#elif defined(__AVR_TINY__) - ld r19,Z -#else - lpm - mov r19,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r20,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r20,Z -#elif defined(__AVR_TINY__) - ld r20,Z -#else - lpm - mov r20,r0 -#endif - inc r30 -#if defined(RAMPZ) - elpm r21,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r21,Z -#elif defined(__AVR_TINY__) - ld r21,Z -#else - lpm - mov r21,r0 -#endif - inc r30 - eor r22,r18 - eor r23,r19 - eor r2,r20 - eor r3,r21 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - eor r12,r22 - eor r13,r23 - eor r14,r2 - eor r15,r3 - eor r22,r12 - eor r23,r13 - eor r2,r14 - eor r3,r15 - ret -2067: - movw r30,r26 - sbiw r30,40 - push r3 - push r2 - push r23 - push r22 - push r7 - push r6 - push r5 - push r4 - ld r22,Z - ldd r23,Z+1 - ldd r2,Z+2 - ldd r3,Z+3 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - movw r18,r26 - movw r20,r24 - movw r18,r20 - mov r20,r1 - mov r21,r1 - eor r18,r26 - eor r19,r27 - andi r18,51 - andi r19,51 - eor r26,r18 - eor r27,r19 - mov r20,r1 - mov r21,r1 - movw r20,r18 - mov r18,r1 - mov r19,r1 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,68 - andi r19,68 - andi r20,85 - andi r21,85 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - st Z,r26 - std Z+1,r27 - std Z+2,r24 - std Z+3,r25 - movw r18,r22 - movw r20,r2 - andi r18,51 - andi r19,51 - andi r20,51 - andi r21,51 - andi r22,204 - andi r23,204 - ldi r17,204 - and r2,r17 - and r3,r17 - or r2,r21 - or r3,r18 - or r22,r19 - or r23,r20 - movw r18,r2 - movw r20,r22 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r2 - eor r19,r3 - eor r20,r22 - eor r21,r23 - mov r18,r1 - andi r19,17 - andi r20,85 - andi r21,85 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r2,r18 - eor r3,r19 - eor r22,r20 - eor r23,r21 - std Z+4,r2 - std Z+5,r3 - std Z+6,r22 - std Z+7,r23 - ldd r22,Z+8 - ldd r23,Z+9 - ldd r2,Z+10 - ldd r3,Z+11 - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - lsl r26 - adc r26,r1 - lsl r26 - adc r26,r1 - swap r27 - lsl r24 - adc r24,r1 - lsl r24 - adc r24,r1 - swap r25 - std Z+8,r26 - std Z+9,r27 - std Z+10,r24 - std Z+11,r25 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r22 - adc r22,r1 - lsl r23 - adc r23,r1 - lsl r23 - adc r23,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r2 - adc r2,r1 - lsl r3 - adc r3,r1 - lsl r3 - adc r3,r1 - std Z+12,r22 - std Z+13,r23 - std Z+14,r2 - std Z+15,r3 - ldd r22,Z+16 - ldd r23,Z+17 - ldd r2,Z+18 - ldd r3,Z+19 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r24,Z+22 - ldd r25,Z+23 - movw r18,r26 - movw r20,r24 - andi r18,170 - andi r19,170 - andi r20,170 - andi r21,170 - andi r26,85 - andi r27,85 - andi r24,85 - andi r25,85 - or r26,r19 - or r27,r20 - or r24,r21 - or r25,r18 - std Z+16,r24 - std Z+17,r25 - std Z+18,r26 - std Z+19,r27 - movw r18,r22 - movw r20,r2 - andi r18,85 - andi r19,85 - andi r20,85 - andi r21,85 - andi r22,170 - andi r23,170 - ldi r16,170 - and r2,r16 - and r3,r16 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - lsl r22 - rol r23 - rol r2 - rol r3 - adc r22,r1 - or r22,r18 - or r23,r19 - or r2,r20 - or r3,r21 - std Z+20,r3 - std Z+21,r22 - std Z+22,r23 - std Z+23,r2 - ldd r22,Z+24 - ldd r23,Z+25 - ldd r2,Z+26 - ldd r3,Z+27 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r24,Z+30 - ldd r25,Z+31 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - lsr r21 - ror r20 - ror r19 - ror r18 - eor r18,r26 - eor r19,r27 - eor r20,r24 - eor r21,r25 - andi r18,3 - andi r19,3 - andi r20,3 - andi r21,3 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - lsl r18 - rol r19 - rol r20 - rol r21 - lsl r18 - rol r19 - rol r20 - rol r21 - eor r26,r18 - eor r27,r19 - eor r24,r20 - eor r25,r21 - movw r18,r26 - movw r20,r24 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,120 - andi r19,120 - andi r20,120 - andi r21,120 - movw r4,r18 - movw r6,r20 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - lsr r7 - ror r6 - ror r5 - ror r4 - eor r4,r18 - eor r5,r19 - eor r6,r20 - eor r7,r21 - ldi r16,8 - and r4,r16 - and r5,r16 - and r6,r16 - and r7,r16 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - lsl r4 - rol r5 - rol r6 - rol r7 - eor r18,r4 - eor r19,r5 - eor r20,r6 - eor r21,r7 - andi r26,15 - andi r27,15 - andi r24,15 - andi r25,15 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - std Z+24,r26 - std Z+25,r27 - std Z+26,r24 - std Z+27,r25 - movw r18,r2 - lsr r19 - ror r18 - lsr r19 - ror r18 - andi r18,48 - andi r19,48 - movw r26,r22 - movw r24,r2 - andi r26,1 - andi r27,1 - andi r24,1 - andi r25,1 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - lsl r26 - rol r27 - rol r24 - rol r25 - or r26,r18 - or r27,r19 - movw r18,r2 - lsl r18 - rol r19 - lsl r18 - rol r19 - andi r18,192 - andi r19,192 - or r26,r18 - or r27,r19 - movw r18,r22 - andi r18,224 - andi r19,224 - lsr r19 - ror r18 - or r24,r18 - or r25,r19 - movw r18,r22 - movw r20,r2 - lsr r21 - ror r20 - ror r19 - ror r18 - andi r18,7 - andi r19,7 - andi r20,7 - andi r21,7 - or r26,r18 - or r27,r19 - or r24,r20 - or r25,r21 - andi r22,16 - andi r23,16 - lsl r22 - rol r23 - lsl r22 - rol r23 - lsl r22 - rol r23 - or r24,r22 - or r25,r23 - std Z+28,r26 - std Z+29,r27 - std Z+30,r24 - std Z+31,r25 - ldd r22,Z+32 - ldd r23,Z+33 - ldd r2,Z+34 - ldd r3,Z+35 - ldd r26,Z+36 - ldd r27,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Z+32,r27 - std Z+33,r26 - std Z+34,r24 - std Z+35,r25 - mov r0,r1 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - lsr r23 - ror r22 - ror r0 - or r23,r0 - mov r0,r2 - mov r2,r3 - mov r3,r0 - lsl r2 - rol r3 - adc r2,r1 - lsl r2 - rol r3 - adc r2,r1 - std Z+36,r22 - std Z+37,r23 - std Z+38,r2 - std Z+39,r3 - pop r4 - pop r5 - pop r6 - pop r7 - pop r22 - pop r23 - pop r2 - pop r3 - movw r26,r30 - ret -2541: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - subi r28,175 - sbci r29,255 - ld r26,Y+ - ld r27,Y - subi r28,82 - sbc r29,r1 - st X+,r22 - st X+,r23 - st X+,r2 - st X+,r3 - st X+,r4 - st X+,r5 - st X+,r6 - st X+,r7 - st X+,r8 - st X+,r9 - st X+,r10 - st X+,r11 - st X+,r12 - st X+,r13 - st X+,r14 - st X+,r15 - subi r28,174 - sbci r29,255 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded - - .section .progmem.data,"a",@progbits - .p2align 8 - .type table_1, @object - .size table_1, 40 -table_1: - .byte 1 - .byte 3 - .byte 7 - .byte 15 - .byte 31 - .byte 62 - .byte 61 - .byte 59 - .byte 55 - .byte 47 - .byte 30 - .byte 60 - .byte 57 - .byte 51 - .byte 39 - .byte 14 - .byte 29 - .byte 58 - .byte 53 - .byte 43 - .byte 22 - .byte 44 - .byte 24 - .byte 48 - .byte 33 - .byte 2 - .byte 5 - .byte 11 - .byte 23 - .byte 46 - .byte 28 - .byte 56 - .byte 49 - .byte 35 - .byte 6 - .byte 13 - .byte 27 - .byte 54 - .byte 45 - .byte 26 - - .text -.global gift128b_decrypt - .type gift128b_decrypt, @function -gift128b_decrypt: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r23 - push r22 - movw r30,r24 - movw r26,r20 - in r28,0x3d - in r29,0x3e - sbiw r28,16 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 -.L__stack_usage = 35 - ld r3,X+ - ld r2,X+ - ld r23,X+ - ld r22,X+ - ld r7,X+ - ld r6,X+ - ld r5,X+ - ld r4,X+ - ld r11,X+ - ld r10,X+ - ld r9,X+ - ld r8,X+ - ld r15,X+ - ld r14,X+ - ld r13,X+ - ld r12,X+ - ldd r26,Z+12 - ldd r27,Z+13 - ldd r24,Z+14 - ldd r25,Z+15 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Z+4 - ldd r27,Z+5 - ldd r24,Z+6 - ldd r25,Z+7 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Z+8 - ldd r27,Z+9 - ldd r24,Z+10 - ldd r25,Z+11 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ld r26,Z - ldd r27,Z+1 - ldd r24,Z+2 - ldd r25,Z+3 - mov r0,r27 - mov r27,r26 - mov r26,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - lsr r25 - ror r24 - ror r0 - or r25,r0 - ldi r30,lo8(table_1) - ldi r31,hi8(table_1) -#if defined(RAMPZ) - ldi r17,hh8(table_1) - in r0,_SFR_IO_ADDR(RAMPZ) - push r0 - out _SFR_IO_ADDR(RAMPZ),r17 -#endif - ldi r16,40 -114: - ldd r0,Y+9 - eor r8,r0 - ldd r0,Y+10 - eor r9,r0 - ldd r0,Y+11 - eor r10,r0 - ldd r0,Y+12 - eor r11,r0 - std Y+13,r26 - std Y+14,r27 - std Y+15,r24 - std Y+16,r25 - ldd r26,Y+1 - ldd r27,Y+2 - ldd r24,Y+3 - ldd r25,Y+4 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+13 - eor r8,r0 - ldd r0,Y+14 - eor r9,r0 - ldd r0,Y+15 - eor r10,r0 - ldd r0,Y+16 - eor r11,r0 - std Y+1,r26 - std Y+2,r27 - std Y+3,r24 - std Y+4,r25 - ldd r26,Y+5 - ldd r27,Y+6 - ldd r24,Y+7 - ldd r25,Y+8 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+1 - eor r8,r0 - ldd r0,Y+2 - eor r9,r0 - ldd r0,Y+3 - eor r10,r0 - ldd r0,Y+4 - eor r11,r0 - std Y+5,r26 - std Y+6,r27 - std Y+7,r24 - std Y+8,r25 - ldd r26,Y+9 - ldd r27,Y+10 - ldd r24,Y+11 - ldd r25,Y+12 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - ldd r0,Y+5 - eor r8,r0 - ldd r0,Y+6 - eor r9,r0 - ldd r0,Y+7 - eor r10,r0 - ldd r0,Y+8 - eor r11,r0 - std Y+9,r26 - std Y+10,r27 - std Y+11,r24 - std Y+12,r25 - ldd r26,Y+13 - ldd r27,Y+14 - ldd r24,Y+15 - ldd r25,Y+16 - mov r0,r1 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - lsr r27 - ror r26 - ror r0 - or r27,r0 - lsl r24 - rol r25 - adc r24,r1 - lsl r24 - rol r25 - adc r24,r1 - rcall 266f - cpse r16,r1 - rjmp 114b - rjmp 611f -266: - eor r4,r26 - eor r5,r27 - eor r6,r24 - eor r7,r25 - ldi r18,128 - eor r15,r18 - dec r16 - mov r30,r16 -#if defined(RAMPZ) - elpm r18,Z -#elif defined(__AVR_HAVE_LPMX__) - lpm r18,Z -#elif defined(__AVR_TINY__) - ld r18,Z -#else - lpm - mov r18,r0 -#endif - eor r12,r18 - bst r22,1 - bld r0,0 - bst r3,0 - bld r22,1 - bst r22,6 - bld r3,0 - bst r2,1 - bld r22,6 - bst r3,4 - bld r2,1 - bst r22,7 - bld r3,4 - bst r23,1 - bld r22,7 - bst r3,2 - bld r23,1 - bst r2,6 - bld r3,2 - bst r2,5 - bld r2,6 - bst r3,5 - bld r2,5 - bst r3,7 - bld r3,5 - bst r23,7 - bld r3,7 - bst r23,3 - bld r23,7 - bst r23,2 - bld r23,3 - bst r2,2 - bld r23,2 - bst r2,4 - bld r2,2 - bst r22,5 - bld r2,4 - bst r3,1 - bld r22,5 - bst r3,6 - bld r3,1 - bst r2,7 - bld r3,6 - bst r23,5 - bld r2,7 - bst r3,3 - bld r23,5 - bst r23,6 - bld r3,3 - bst r2,3 - bld r23,6 - bst r23,4 - bld r2,3 - bst r22,3 - bld r23,4 - bst r23,0 - bld r22,3 - bst r22,2 - bld r23,0 - bst r2,0 - bld r22,2 - bst r22,4 - bld r2,0 - bst r0,0 - bld r22,4 - bst r4,0 - bld r0,0 - bst r5,0 - bld r4,0 - bst r5,2 - bld r5,0 - bst r7,2 - bld r5,2 - bst r7,6 - bld r7,2 - bst r7,7 - bld r7,6 - bst r6,7 - bld r7,7 - bst r6,5 - bld r6,7 - bst r4,5 - bld r6,5 - bst r4,1 - bld r4,5 - bst r0,0 - bld r4,1 - bst r4,2 - bld r0,0 - bst r7,0 - bld r4,2 - bst r5,6 - bld r7,0 - bst r7,3 - bld r5,6 - bst r6,6 - bld r7,3 - bst r7,5 - bld r6,6 - bst r4,7 - bld r7,5 - bst r6,1 - bld r4,7 - bst r4,4 - bld r6,1 - bst r5,1 - bld r4,4 - bst r0,0 - bld r5,1 - bst r4,3 - bld r0,0 - bst r6,0 - bld r4,3 - bst r5,4 - bld r6,0 - bst r5,3 - bld r5,4 - bst r6,2 - bld r5,3 - bst r7,4 - bld r6,2 - bst r5,7 - bld r7,4 - bst r6,3 - bld r5,7 - bst r6,4 - bld r6,3 - bst r5,5 - bld r6,4 - bst r0,0 - bld r5,5 - bst r4,6 - bld r0,0 - bst r7,1 - bld r4,6 - bst r0,0 - bld r7,1 - bst r8,0 - bld r0,0 - bst r10,0 - bld r8,0 - bst r10,4 - bld r10,0 - bst r10,5 - bld r10,4 - bst r9,5 - bld r10,5 - bst r9,3 - bld r9,5 - bst r11,2 - bld r9,3 - bst r8,6 - bld r11,2 - bst r8,1 - bld r8,6 - bst r9,0 - bld r8,1 - bst r10,2 - bld r9,0 - bst r8,4 - bld r10,2 - bst r10,1 - bld r8,4 - bst r9,4 - bld r10,1 - bst r10,3 - bld r9,4 - bst r11,4 - bld r10,3 - bst r10,7 - bld r11,4 - bst r11,5 - bld r10,7 - bst r9,7 - bld r11,5 - bst r11,3 - bld r9,7 - bst r11,6 - bld r11,3 - bst r8,7 - bld r11,6 - bst r11,1 - bld r8,7 - bst r9,6 - bld r11,1 - bst r8,3 - bld r9,6 - bst r11,0 - bld r8,3 - bst r10,6 - bld r11,0 - bst r8,5 - bld r10,6 - bst r9,1 - bld r8,5 - bst r9,2 - bld r9,1 - bst r8,2 - bld r9,2 - bst r0,0 - bld r8,2 - bst r12,0 - bld r0,0 - bst r15,0 - bld r12,0 - bst r15,6 - bld r15,0 - bst r13,7 - bld r15,6 - bst r12,3 - bld r13,7 - bst r0,0 - bld r12,3 - bst r12,1 - bld r0,0 - bst r14,0 - bld r12,1 - bst r15,4 - bld r14,0 - bst r15,7 - bld r15,4 - bst r12,7 - bld r15,7 - bst r0,0 - bld r12,7 - bst r12,2 - bld r0,0 - bst r13,0 - bld r12,2 - bst r15,2 - bld r13,0 - bst r13,6 - bld r15,2 - bst r13,3 - bld r13,6 - bst r0,0 - bld r13,3 - bst r12,4 - bld r0,0 - bst r15,1 - bld r12,4 - bst r14,6 - bld r15,1 - bst r13,5 - bld r14,6 - bst r14,3 - bld r13,5 - bst r0,0 - bld r14,3 - bst r12,5 - bld r0,0 - bst r14,1 - bld r12,5 - bst r14,4 - bld r14,1 - bst r15,5 - bld r14,4 - bst r14,7 - bld r15,5 - bst r0,0 - bld r14,7 - bst r12,6 - bld r0,0 - bst r13,1 - bld r12,6 - bst r14,2 - bld r13,1 - bst r13,4 - bld r14,2 - bst r15,3 - bld r13,4 - bst r0,0 - bld r15,3 - movw r18,r12 - movw r20,r14 - movw r12,r22 - movw r14,r2 - movw r22,r18 - movw r2,r20 - and r18,r4 - and r19,r5 - and r20,r6 - and r21,r7 - eor r8,r18 - eor r9,r19 - eor r10,r20 - eor r11,r21 - com r12 - com r13 - com r14 - com r15 - eor r4,r12 - eor r5,r13 - eor r6,r14 - eor r7,r15 - eor r12,r8 - eor r13,r9 - eor r14,r10 - eor r15,r11 - mov r0,r22 - or r0,r4 - eor r8,r0 - mov r0,r23 - or r0,r5 - eor r9,r0 - mov r0,r2 - or r0,r6 - eor r10,r0 - mov r0,r3 - or r0,r7 - eor r11,r0 - mov r0,r4 - and r0,r12 - eor r22,r0 - mov r0,r5 - and r0,r13 - eor r23,r0 - mov r0,r6 - and r0,r14 - eor r2,r0 - mov r0,r7 - and r0,r15 - eor r3,r0 - mov r0,r22 - and r0,r8 - eor r4,r0 - mov r0,r23 - and r0,r9 - eor r5,r0 - mov r0,r2 - and r0,r10 - eor r6,r0 - mov r0,r3 - and r0,r11 - eor r7,r0 - ret -611: -#if defined(RAMPZ) - pop r0 - out _SFR_IO_ADDR(RAMPZ),r0 -#endif - ldd r26,Y+17 - ldd r27,Y+18 - st X+,r3 - st X+,r2 - st X+,r23 - st X+,r22 - st X+,r7 - st X+,r6 - st X+,r5 - st X+,r4 - st X+,r11 - st X+,r10 - st X+,r9 - st X+,r8 - st X+,r15 - st X+,r14 - st X+,r13 - st X+,r12 - adiw r28,18 - in r0,0x3f - cli - out 0x3e,r29 - out 0x3f,r0 - out 0x3d,r28 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size gift128b_decrypt, .-gift128b_decrypt - -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.c deleted file mode 100644 index d192b8e..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "sundae-gift.h" -#include "internal-gift128.h" -#include "internal-util.h" -#include - -aead_cipher_t const sundae_gift_0_cipher = { - "SUNDAE-GIFT-0", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_0_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_0_aead_encrypt, - sundae_gift_0_aead_decrypt -}; - -aead_cipher_t const sundae_gift_64_cipher = { - "SUNDAE-GIFT-64", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_64_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_64_aead_encrypt, - sundae_gift_64_aead_decrypt -}; - -aead_cipher_t const sundae_gift_96_cipher = { - "SUNDAE-GIFT-96", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_96_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_96_aead_encrypt, - sundae_gift_96_aead_decrypt -}; - -aead_cipher_t const sundae_gift_128_cipher = { - "SUNDAE-GIFT-128", - SUNDAE_GIFT_KEY_SIZE, - SUNDAE_GIFT_128_NONCE_SIZE, - SUNDAE_GIFT_TAG_SIZE, - AEAD_FLAG_NONE, - sundae_gift_128_aead_encrypt, - sundae_gift_128_aead_decrypt -}; - -/* Multiply a block value by 2 in the special byte field */ -STATIC_INLINE void sundae_gift_multiply(unsigned char B[16]) -{ - unsigned char B0 = B[0]; - unsigned index; - for (index = 0; index < 15; ++index) - B[index] = B[index + 1]; - B[15] = B0; - B[10] ^= B0; - B[12] ^= B0; - B[14] ^= B0; -} - -/* Compute a MAC over the concatenation of two data buffers */ -static void sundae_gift_aead_mac - (const gift128b_key_schedule_t *ks, unsigned char V[16], - const unsigned char *data1, unsigned data1len, - const unsigned char *data2, unsigned long data2len) -{ - unsigned len; - - /* Nothing to do if the input is empty */ - if (!data1len && !data2len) - return; - - /* Format the first block. We assume that data1len <= 16 - * as it is will be the nonce if it is non-zero in length */ - lw_xor_block(V, data1, data1len); - len = 16 - data1len; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V + data1len, data2, len); - data2 += len; - data2len -= len; - len += data1len; - - /* Process as many full blocks as we can, except the last */ - while (data2len > 0) { - gift128b_encrypt(ks, V, V); - len = 16; - if (len > data2len) - len = (unsigned)data2len; - lw_xor_block(V, data2, len); - data2 += len; - data2len -= len; - } - - /* Pad and process the last block */ - if (len < 16) { - V[len] ^= 0x80; - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } else { - sundae_gift_multiply(V); - sundae_gift_multiply(V); - gift128b_encrypt(ks, V, V); - } -} - -static int sundae_gift_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char P[16]; - - /* Compute the length of the output ciphertext */ - *clen = mlen + SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (mlen > 0) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, T, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, T, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, T, 0, 0, m, mlen); - - /* Encrypt the plaintext to produce the ciphertext. We need to be - * careful how we manage the data because we could be doing in-place - * encryption. In SUNDAE-GIFT, the first 16 bytes of the ciphertext - * is the tag rather than the last 16 bytes in other algorithms. - * We need to swap the plaintext for the current block with the - * ciphertext or tag from the previous block */ - memcpy(V, T, 16); - while (mlen >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(P, V, m, 16); - memcpy(c, T, 16); - memcpy(T, P, 16); - c += 16; - m += 16; - mlen -= 16; - } - if (mlen > 0) { - unsigned leftover = (unsigned)mlen; - gift128b_encrypt(&ks, V, V); - lw_xor_block(V, m, leftover); - memcpy(c, T, 16); - memcpy(c + 16, V, leftover); - } else { - memcpy(c, T, 16); - } - return 0; -} - -static int sundae_gift_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, unsigned npublen, - const unsigned char *k, unsigned char domainsep) -{ - gift128b_key_schedule_t ks; - unsigned char V[16]; - unsigned char T[16]; - unsigned char *mtemp; - unsigned long len; - - /* Bail out if the ciphertext is too short */ - if (clen < SUNDAE_GIFT_TAG_SIZE) - return -1; - len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; - - /* Set the key schedule */ - gift128b_init(&ks, k); - - /* Decrypt the ciphertext to produce the plaintext, using the - * tag as the initialization vector for the decryption process */ - memcpy(T, c, SUNDAE_GIFT_TAG_SIZE); - c += SUNDAE_GIFT_TAG_SIZE; - mtemp = m; - memcpy(V, T, 16); - while (len >= 16) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, 16); - c += 16; - mtemp += 16; - len -= 16; - } - if (len > 0) { - gift128b_encrypt(&ks, V, V); - lw_xor_block_2_src(mtemp, c, V, (unsigned)len); - } - - /* Format and encrypt the initial domain separation block */ - if (adlen > 0) - domainsep |= 0x80; - if (clen > SUNDAE_GIFT_TAG_SIZE) - domainsep |= 0x40; - V[0] = domainsep; - memset(V + 1, 0, sizeof(V) - 1); - gift128b_encrypt(&ks, V, V); - - /* Authenticate the nonce and the associated data */ - sundae_gift_aead_mac(&ks, V, npub, npublen, ad, adlen); - - /* Authenticate the plaintext */ - sundae_gift_aead_mac(&ks, V, 0, 0, m, *mlen); - - /* Check the authentication tag */ - return aead_check_tag(m, *mlen, T, V, 16); -} - -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - (void)npub; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, 0, 0, k, 0x00); -} - -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_64_NONCE_SIZE, k, 0x90); -} - -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_96_NONCE_SIZE, k, 0xA0); -} - -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_encrypt - (c, clen, m, mlen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} - -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - (void)nsec; - return sundae_gift_aead_decrypt - (m, mlen, c, clen, ad, adlen, - npub, SUNDAE_GIFT_128_NONCE_SIZE, k, 0xB0); -} diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.h deleted file mode 100644 index 9040dd5..0000000 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys-avr/sundae-gift.h +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_SUNDAE_GIFT_H -#define LWCRYPTO_SUNDAE_GIFT_H - -#include "aead-common.h" - -/** - * \file sundae-gift.h - * \brief SUNDAE-GIFT encryption algorithm family. - * - * The SUNDAE-GIFT family consists of several related algorithms: - * - * \li SUNDAE-GIFT-0 with a 128-bit key, a 0-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-64 with a 128-bit key, a 64-bit nonce, and 128-bit tag. - * \li SUNDAE-GIFT-96 with a 128-bit key, a 96-bit nonce, and 128-bit tag. - * This is the primary member of the family. - * \li SUNDAE-GIFT-128 with a 128-bit key, a 128-bit nonce, and 128-bit tag. - * - * SUNDAE-GIFT is resistant against nonce reuse as long as the combination - * of the associated data and plaintext is unique. - * - * If a nonce is reused (or there is no nonce in the case of SUNDAE-GIFT-0), - * then two packets with the same associated data and plaintext will encrypt - * to the same ciphertext. This will leak that the same plaintext has been - * sent for a second time but will not reveal the plaintext itself. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for all SUNDAE-GIFT family members. - */ -#define SUNDAE_GIFT_TAG_SIZE 16 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-0. - */ -#define SUNDAE_GIFT_0_NONCE_SIZE 0 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-64. - */ -#define SUNDAE_GIFT_64_NONCE_SIZE 8 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-96. - */ -#define SUNDAE_GIFT_96_NONCE_SIZE 12 - -/** - * \brief Size of the nonce for SUNDAE-GIFT-128. - */ -#define SUNDAE_GIFT_128_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the SUNDAE-GIFT-0 cipher. - */ -extern aead_cipher_t const sundae_gift_0_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-64 cipher. - */ -extern aead_cipher_t const sundae_gift_64_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-96 cipher. - */ -extern aead_cipher_t const sundae_gift_96_cipher; - -/** - * \brief Meta-information block for the SUNDAE-GIFT-128 cipher. - */ -extern aead_cipher_t const sundae_gift_128_cipher; - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_0_aead_decrypt() - */ -int sundae_gift_0_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-0. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce - not used by this algorithm. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_0_aead_encrypt() - */ -int sundae_gift_0_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_64_aead_decrypt() - */ -int sundae_gift_64_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-64. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 8 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_64_aead_encrypt() - */ -int sundae_gift_64_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_96_aead_decrypt() - */ -int sundae_gift_96_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-96. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_96_aead_encrypt() - */ -int sundae_gift_96_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with SUNDAE-GIFT-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa sundae_gift_128_aead_decrypt() - */ -int sundae_gift_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with SUNDAE-GIFT-12896. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa sundae_gift_128_aead_encrypt() - */ -int sundae_gift_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128-config.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128-config.h new file mode 100644 index 0000000..62131ba --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128-config.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_GIFT128_CONFIG_H +#define LW_INTERNAL_GIFT128_CONFIG_H + +/** + * \file internal-gift128-config.h + * \brief Configures the variant of GIFT-128 to use. + */ + +/** + * \brief Select the full variant of GIFT-128. + * + * The full variant requires 320 bytes for the key schedule and uses the + * fixslicing method to implement encryption and decryption. + */ +#define GIFT128_VARIANT_FULL 0 + +/** + * \brief Select the small variant of GIFT-128. + * + * The small variant requires 80 bytes for the key schedule. The rest + * of the key schedule is expanded on the fly during encryption. + * + * The fixslicing method is used to implement encryption and the slower + * bitslicing method is used to implement decryption. The small variant + * is suitable when memory is at a premium, decryption is not needed, + * but encryption performance is still important. + */ +#define GIFT128_VARIANT_SMALL 1 + +/** + * \brief Select the tiny variant of GIFT-128. + * + * The tiny variant requires 16 bytes for the key schedule and uses the + * bitslicing method to implement encryption and decryption. It is suitable + * for use when memory is very tight and performance is not critical. + */ +#define GIFT128_VARIANT_TINY 2 + +/** + * \def GIFT128_VARIANT + * \brief Selects the default variant of GIFT-128 to use on this platform. + */ +/** + * \def GIFT128_VARIANT_ASM + * \brief Defined to 1 if the GIFT-128 implementation has been replaced + * with an assembly code version. + */ +#if defined(__AVR__) && !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 1 +#endif +#if !defined(GIFT128_VARIANT) +#define GIFT128_VARIANT GIFT128_VARIANT_FULL +#endif +#if !defined(GIFT128_VARIANT_ASM) +#define GIFT128_VARIANT_ASM 0 +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.c b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.c index 681dbc8..c6ac5ec 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.c @@ -23,8 +23,12 @@ #include "internal-gift128.h" #include "internal-util.h" +#if !GIFT128_VARIANT_ASM + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /* Round constants for GIFT-128 in the fixsliced representation */ -static uint32_t const GIFT128_RC[40] = { +static uint32_t const GIFT128_RC_fixsliced[40] = { 0x10000008, 0x80018000, 0x54000002, 0x01010181, 0x8000001f, 0x10888880, 0x6001e000, 0x51500002, 0x03030180, 0x8000002f, 0x10088880, 0x60016000, 0x41500002, 0x03030080, 0x80000027, 0x10008880, 0x4001e000, 0x11500002, @@ -34,6 +38,246 @@ static uint32_t const GIFT128_RC[40] = { 0xc001a000, 0x14500002, 0x01020181, 0x8000001a }; +#endif + +#if GIFT128_VARIANT != GIFT128_VARIANT_FULL + +/* Round constants for GIFT-128 in the bitsliced representation */ +static uint8_t const GIFT128_RC[40] = { + 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, + 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, + 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, + 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, + 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A +}; + +#endif + +/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ +#define bit_permute_step(_y, mask, shift) \ + do { \ + uint32_t y = (_y); \ + uint32_t t = ((y >> (shift)) ^ y) & (mask); \ + (_y) = (y ^ t) ^ (t << (shift)); \ + } while (0) + +/* + * The permutation below was generated by the online permuation generator at + * "http://programming.sirrida.de/calcperm.php". + * + * All of the permutuations are essentially the same, except that each is + * rotated by 8 bits with respect to the next: + * + * P0: 0 24 16 8 1 25 17 9 2 26 18 10 3 27 19 11 4 28 20 12 5 29 21 13 6 30 22 14 7 31 23 15 + * P1: 8 0 24 16 9 1 25 17 10 2 26 18 11 3 27 19 12 4 28 20 13 5 29 21 14 6 30 22 15 7 31 23 + * P2: 16 8 0 24 17 9 1 25 18 10 2 26 19 11 3 27 20 12 4 28 21 13 5 29 22 14 6 30 23 15 7 31 + * P3: 24 16 8 0 25 17 9 1 26 18 10 2 27 19 11 3 28 20 12 4 29 21 13 5 30 22 14 6 31 23 15 7 + * + * The most efficient permutation from the online generator was P3, so we + * perform it as the core of the others, and then perform a final rotation. + * + * It is possible to do slightly better than "P3 then rotate" on desktop and + * server architectures for the other permutations. But the advantage isn't + * as evident on embedded platforms so we keep things simple. + */ +#define PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define PERM0(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate8(_x); \ + } while (0) +#define PERM1(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate16(_x); \ + } while (0) +#define PERM2(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = leftRotate24(_x); \ + } while (0) +#define PERM3(x) \ + do { \ + uint32_t _x = (x); \ + PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +#define INV_PERM3_INNER(x) \ + do { \ + bit_permute_step(x, 0x00550055, 9); \ + bit_permute_step(x, 0x00003333, 18); \ + bit_permute_step(x, 0x000f000f, 12); \ + bit_permute_step(x, 0x000000ff, 24); \ + } while (0) +#define INV_PERM0(x) \ + do { \ + uint32_t _x = rightRotate8(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM1(x) \ + do { \ + uint32_t _x = rightRotate16(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM2(x) \ + do { \ + uint32_t _x = rightRotate24(x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) +#define INV_PERM3(x) \ + do { \ + uint32_t _x = (x); \ + INV_PERM3_INNER(_x); \ + (x) = _x; \ + } while (0) + +/** + * \brief Converts the GIFT-128 nibble-based representation into word-based. + * + * \param output Output buffer to write the word-based version to. + * \param input Input buffer to read the nibble-based version from. + * + * The \a input and \a output buffers can be the same buffer. + */ +static void gift128n_to_words + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input buffer into 32-bit words. We use the nibble order + * from the HYENA submission to NIST which is byte-reversed with respect + * to the nibble order of the original GIFT-128 paper. Nibble zero is in + * the first byte instead of the last, which means little-endian order. */ + s0 = le_load_word32(input + 12); + s1 = le_load_word32(input + 8); + s2 = le_load_word32(input + 4); + s3 = le_load_word32(input); + + /* Rearrange the bits so that bits 0..3 of each nibble are + * scattered to bytes 0..3 of each word. The permutation is: + * + * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 + * + * Generated with "http://programming.sirrida.de/calcperm.php". + */ + #define PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x0a0a0a0a, 3); \ + bit_permute_step(x, 0x00cc00cc, 6); \ + bit_permute_step(x, 0x0000f0f0, 12); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + PERM_WORDS(s0); + PERM_WORDS(s1); + PERM_WORDS(s2); + PERM_WORDS(s3); + + /* Rearrange the bytes and write them to the output buffer */ + output[0] = (uint8_t)s0; + output[1] = (uint8_t)s1; + output[2] = (uint8_t)s2; + output[3] = (uint8_t)s3; + output[4] = (uint8_t)(s0 >> 8); + output[5] = (uint8_t)(s1 >> 8); + output[6] = (uint8_t)(s2 >> 8); + output[7] = (uint8_t)(s3 >> 8); + output[8] = (uint8_t)(s0 >> 16); + output[9] = (uint8_t)(s1 >> 16); + output[10] = (uint8_t)(s2 >> 16); + output[11] = (uint8_t)(s3 >> 16); + output[12] = (uint8_t)(s0 >> 24); + output[13] = (uint8_t)(s1 >> 24); + output[14] = (uint8_t)(s2 >> 24); + output[15] = (uint8_t)(s3 >> 24); +} + +/** + * \brief Converts the GIFT-128 word-based representation into nibble-based. + * + * \param output Output buffer to write the nibble-based version to. + * \param input Input buffer to read the word-based version from. + */ +static void gift128n_to_nibbles + (unsigned char *output, const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + + /* Load the input bytes and rearrange them so that s0 contains the + * most significant nibbles and s3 contains the least significant */ + s0 = (((uint32_t)(input[12])) << 24) | + (((uint32_t)(input[8])) << 16) | + (((uint32_t)(input[4])) << 8) | + ((uint32_t)(input[0])); + s1 = (((uint32_t)(input[13])) << 24) | + (((uint32_t)(input[9])) << 16) | + (((uint32_t)(input[5])) << 8) | + ((uint32_t)(input[1])); + s2 = (((uint32_t)(input[14])) << 24) | + (((uint32_t)(input[10])) << 16) | + (((uint32_t)(input[6])) << 8) | + ((uint32_t)(input[2])); + s3 = (((uint32_t)(input[15])) << 24) | + (((uint32_t)(input[11])) << 16) | + (((uint32_t)(input[7])) << 8) | + ((uint32_t)(input[3])); + + /* Apply the inverse of PERM_WORDS() from the function above */ + #define INV_PERM_WORDS(_x) \ + do { \ + uint32_t x = (_x); \ + bit_permute_step(x, 0x00aa00aa, 7); \ + bit_permute_step(x, 0x0000cccc, 14); \ + bit_permute_step(x, 0x00f000f0, 4); \ + bit_permute_step(x, 0x0000ff00, 8); \ + (_x) = x; \ + } while (0) + INV_PERM_WORDS(s0); + INV_PERM_WORDS(s1); + INV_PERM_WORDS(s2); + INV_PERM_WORDS(s3); + + /* Store the result into the output buffer as 32-bit words */ + le_store_word32(output + 12, s0); + le_store_word32(output + 8, s1); + le_store_word32(output + 4, s2); + le_store_word32(output, s3); +} + +void gift128n_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_encrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +void gift128n_decrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + gift128n_to_words(output, input); + gift128b_decrypt(ks, output, output); + gift128n_to_nibbles(output, output); +} + +#if GIFT128_VARIANT != GIFT128_VARIANT_TINY + /** * \brief Swaps bits within two words. * @@ -202,21 +446,27 @@ static void gift128b_compute_round_keys /* Keys 8, 9, 18, and 19 do not need any adjustment */ } +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL /* Derive the fixsliced keys for the remaining rounds 11..40 */ for (index = 20; index < 80; index += 10) { gift128b_derive_keys(ks->k + index, ks->k + index - 20); } +#endif } -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) { - if (!ks || !key || key_len != 16) - return 0; gift128b_compute_round_keys (ks, be_load_word32(key), be_load_word32(key + 4), be_load_word32(key + 8), be_load_word32(key + 12)); - return 1; +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission */ + gift128b_compute_round_keys + (ks, le_load_word32(key + 12), le_load_word32(key + 8), + le_load_word32(key + 4), le_load_word32(key)); } /** @@ -521,11 +771,37 @@ int gift128b_init gift128b_inv_sbox(s3, s1, s2, s0); \ } while (0) +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key) +{ + /* Mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = be_load_word32(key + 12); + ks->k[1] = be_load_word32(key + 4); + ks->k[2] = be_load_word32(key + 8); + ks->k[3] = be_load_word32(key); +} + +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key) +{ + /* Use the little-endian key byte order from the HYENA submission + * and mirror the fixslicing word order of 3, 1, 2, 0 */ + ks->k[0] = le_load_word32(key); + ks->k[1] = le_load_word32(key + 8); + ks->k[2] = le_load_word32(key + 4); + ks->k[3] = le_load_word32(key + 12); +} + +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into the state buffer and convert from big endian */ s0 = be_load_word32(input); @@ -534,14 +810,20 @@ void gift128b_encrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -555,6 +837,7 @@ void gift128b_encrypt_preloaded const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t k[20]; /* Copy the plaintext into local variables */ s0 = input[0]; @@ -563,14 +846,20 @@ void gift128b_encrypt_preloaded s3 = input[3]; /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer */ output[0] = s0; @@ -579,7 +868,55 @@ void gift128b_encrypt_preloaded output[3] = s3; } -void gift128b_decrypt +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; + uint32_t k[20]; + + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_derive_keys(k, ks->k); + gift128b_derive_keys(k + 10, ks->k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_derive_keys(k, k); + gift128b_derive_keys(k + 10, k + 10); + gift128b_encrypt_5_rounds(k, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(k + 10, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#elif GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_encrypt (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { @@ -592,14 +929,14 @@ void gift128b_decrypt s3 = be_load_word32(input + 12); /* Perform all 40 rounds five at a time using the fixsliced method */ - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); /* Pack the state into the ciphertext buffer in big endian */ be_store_word32(output, s0); @@ -608,173 +945,308 @@ void gift128b_decrypt be_store_word32(output + 12, s3); } -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { - /* Use the little-endian key byte order from the HYENA submission */ - if (!ks || !key || key_len != 16) - return 0; - gift128b_compute_round_keys - (ks, le_load_word32(key + 12), le_load_word32(key + 8), - le_load_word32(key + 4), le_load_word32(key)); - return 1; + uint32_t s0, s1, s2, s3; + + /* Copy the plaintext into local variables */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; + + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -/* http://programming.sirrida.de/perm_fn.html#bit_permute_step */ -#define bit_permute_step(_y, mask, shift) \ - do { \ - uint32_t y = (_y); \ - uint32_t t = ((y >> (shift)) ^ y) & (mask); \ - (_y) = (y ^ t) ^ (t << (shift)); \ - } while (0) +void gift128t_encrypt + (const gift128n_key_schedule_t *ks, unsigned char *output, + const unsigned char *input, uint32_t tweak) +{ + uint32_t s0, s1, s2, s3; -/** - * \brief Converts the GIFT-128 nibble-based representation into word-based. - * - * \param output Output buffer to write the word-based version to. - * \param input Input buffer to read the nibble-based version from. - * - * The \a input and \a output buffers can be the same buffer. - */ -static void gift128n_to_words - (unsigned char *output, const unsigned char *input) + /* Copy the plaintext into the state buffer and convert from nibbles */ + gift128n_to_words(output, input); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* Perform all 40 rounds five at a time using the fixsliced method. + * Every 5 rounds except the last we add the tweak value to the state */ + gift128b_encrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); + gift128n_to_nibbles(output, output); +} + +#else /* GIFT128_VARIANT_TINY */ + +void gift128b_encrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input buffer into 32-bit words. We use the nibble order - * from the HYENA submission to NIST which is byte-reversed with respect - * to the nibble order of the original GIFT-128 paper. Nibble zero is in - * the first byte instead of the last, which means little-endian order. */ - s0 = le_load_word32(input + 12); - s1 = le_load_word32(input + 8); - s2 = le_load_word32(input + 4); - s3 = le_load_word32(input); + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); - /* Rearrange the bits so that bits 0..3 of each nibble are - * scattered to bytes 0..3 of each word. The permutation is: - * - * 0 8 16 24 1 9 17 25 2 10 18 26 3 11 19 27 4 12 20 28 5 13 21 29 6 14 22 30 7 15 23 31 - * - * Generated with "http://programming.sirrida.de/calcperm.php". - */ - #define PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x0a0a0a0a, 3); \ - bit_permute_step(x, 0x00cc00cc, 6); \ - bit_permute_step(x, 0x0000f0f0, 12); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - PERM_WORDS(s0); - PERM_WORDS(s1); - PERM_WORDS(s2); - PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Rearrange the bytes and write them to the output buffer */ - output[0] = (uint8_t)s0; - output[1] = (uint8_t)s1; - output[2] = (uint8_t)s2; - output[3] = (uint8_t)s3; - output[4] = (uint8_t)(s0 >> 8); - output[5] = (uint8_t)(s1 >> 8); - output[6] = (uint8_t)(s2 >> 8); - output[7] = (uint8_t)(s3 >> 8); - output[8] = (uint8_t)(s0 >> 16); - output[9] = (uint8_t)(s1 >> 16); - output[10] = (uint8_t)(s2 >> 16); - output[11] = (uint8_t)(s3 >> 16); - output[12] = (uint8_t)(s0 >> 24); - output[13] = (uint8_t)(s1 >> 24); - output[14] = (uint8_t)(s2 >> 24); - output[15] = (uint8_t)(s3 >> 24); + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); } -/** - * \brief Converts the GIFT-128 word-based representation into nibble-based. - * - * \param output Output buffer to write the nibble-based version to. - * \param input Input buffer to read the word-based version from. - */ -static void gift128n_to_nibbles - (unsigned char *output, const unsigned char *input) +void gift128b_encrypt_preloaded + (const gift128b_key_schedule_t *ks, uint32_t output[4], + const uint32_t input[4]) { uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Load the input bytes and rearrange them so that s0 contains the - * most significant nibbles and s3 contains the least significant */ - s0 = (((uint32_t)(input[12])) << 24) | - (((uint32_t)(input[8])) << 16) | - (((uint32_t)(input[4])) << 8) | - ((uint32_t)(input[0])); - s1 = (((uint32_t)(input[13])) << 24) | - (((uint32_t)(input[9])) << 16) | - (((uint32_t)(input[5])) << 8) | - ((uint32_t)(input[1])); - s2 = (((uint32_t)(input[14])) << 24) | - (((uint32_t)(input[10])) << 16) | - (((uint32_t)(input[6])) << 8) | - ((uint32_t)(input[2])); - s3 = (((uint32_t)(input[15])) << 24) | - (((uint32_t)(input[11])) << 16) | - (((uint32_t)(input[7])) << 8) | - ((uint32_t)(input[3])); + /* Copy the plaintext into the state buffer */ + s0 = input[0]; + s1 = input[1]; + s2 = input[2]; + s3 = input[3]; - /* Apply the inverse of PERM_WORDS() from the function above */ - #define INV_PERM_WORDS(_x) \ - do { \ - uint32_t x = (_x); \ - bit_permute_step(x, 0x00aa00aa, 7); \ - bit_permute_step(x, 0x0000cccc, 14); \ - bit_permute_step(x, 0x00f000f0, 4); \ - bit_permute_step(x, 0x0000ff00, 8); \ - (_x) = x; \ - } while (0) - INV_PERM_WORDS(s0); - INV_PERM_WORDS(s1); - INV_PERM_WORDS(s2); - INV_PERM_WORDS(s3); + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } - /* Store the result into the output buffer as 32-bit words */ - le_store_word32(output + 12, s0); - le_store_word32(output + 8, s1); - le_store_word32(output + 4, s2); - le_store_word32(output, s3); + /* Pack the state into the ciphertext buffer */ + output[0] = s0; + output[1] = s1; + output[2] = s2; + output[3] = s3; } -void gift128n_encrypt +void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input) + const unsigned char *input, uint32_t tweak) { + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); - gift128b_encrypt(ks, output, output); + s0 = be_load_word32(output); + s1 = be_load_word32(output + 4); + s2 = be_load_word32(output + 8); + s3 = be_load_word32(output + 12); + + /* The key schedule is initialized with the key itself */ + w0 = ks->k[3]; + w1 = ks->k[1]; + w2 = ks->k[2]; + w3 = ks->k[0]; + + /* Perform all 40 rounds */ + for (round = 0; round < 40; ++round) { + /* SubCells - apply the S-box */ + s1 ^= s0 & s2; + s0 ^= s1 & s3; + s2 ^= s0 | s1; + s3 ^= s2; + s1 ^= s3; + s3 ^= 0xFFFFFFFFU; + s2 ^= s0 & s1; + temp = s0; + s0 = s3; + s3 = temp; + + /* PermBits - apply the 128-bit permutation */ + PERM0(s0); + PERM1(s1); + PERM2(s2); + PERM3(s3); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round]; + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if (((round + 1) % 5) == 0 && round < 39) + s0 ^= tweak; + + /* Rotate the key schedule */ + temp = w3; + w3 = w2; + w2 = w1; + w1 = w0; + w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + } + + /* Pack the state into the ciphertext buffer in nibble form */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } -void gift128n_decrypt - (const gift128n_key_schedule_t *ks, unsigned char *output, +#endif /* GIFT128_VARIANT_TINY */ + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, const unsigned char *input) { - gift128n_to_words(output, input); - gift128b_decrypt(ks, output, output); - gift128n_to_nibbles(output, output); -} + uint32_t s0, s1, s2, s3; -/* 4-bit tweak values expanded to 32-bit */ -static uint32_t const GIFT128_tweaks[16] = { - 0x00000000, 0xe1e1e1e1, 0xd2d2d2d2, 0x33333333, - 0xb4b4b4b4, 0x55555555, 0x66666666, 0x87878787, - 0x78787878, 0x99999999, 0xaaaaaaaa, 0x4b4b4b4b, - 0xcccccccc, 0x2d2d2d2d, 0x1e1e1e1e, 0xffffffff -}; + /* Copy the plaintext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); -void gift128t_encrypt + /* Perform all 40 rounds five at a time using the fixsliced method */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); + + /* Pack the state into the ciphertext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + +void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; - /* Copy the plaintext into the state buffer and convert from nibbles */ + /* Copy the ciphertext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); @@ -782,25 +1254,24 @@ void gift128t_encrypt s3 = be_load_word32(output + 12); /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the last we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_encrypt_5_rounds(ks->k, GIFT128_RC); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_encrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); + * Every 5 rounds except the first we add the tweak value to the state */ + gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC_fixsliced + 35); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC_fixsliced + 30); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC_fixsliced + 25); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC_fixsliced + 20); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC_fixsliced + 15); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC_fixsliced + 10); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC_fixsliced + 5); + s0 ^= tweak; + gift128b_decrypt_5_rounds(ks->k, GIFT128_RC_fixsliced); - /* Pack the state into the ciphertext buffer in nibble form */ + /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); be_store_word32(output + 4, s1); be_store_word32(output + 8, s2); @@ -808,37 +1279,211 @@ void gift128t_encrypt gift128n_to_nibbles(output, output); } +#else /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +/* The small variant uses fixslicing for encryption, but we need to change + * to bitslicing for decryption because of the difficulty of fast-forwarding + * the fixsliced key schedule to the end. So the tiny variant is used for + * decryption when the small variant is selected. Since the NIST AEAD modes + * for GIFT-128 only use the block encrypt operation, the inefficiencies + * in decryption don't matter all that much */ + +/** + * \def gift128b_load_and_forward_schedule() + * \brief Generate the decryption key at the end of the last round. + * + * To do that, we run the block operation forward to determine the + * final state of the key schedule after the last round: + * + * w0 = ks->k[0]; + * w1 = ks->k[1]; + * w2 = ks->k[2]; + * w3 = ks->k[3]; + * for (round = 0; round < 40; ++round) { + * temp = w3; + * w3 = w2; + * w2 = w1; + * w1 = w0; + * w0 = ((temp & 0xFFFC0000U) >> 2) | ((temp & 0x00030000U) << 14) | + * ((temp & 0x00000FFFU) << 4) | ((temp & 0x0000F000U) >> 12); + * } + * + * We can short-cut all of the above by noticing that we don't need + * to do the word rotations. Every 4 rounds, the rotation alignment + * returns to the original position and each word has been rotated + * by applying the "2 right and 4 left" bit-rotation step to it. + * We then repeat that 10 times for the full 40 rounds. The overall + * effect is to apply a "20 right and 40 left" bit-rotation to every + * word in the key schedule. That is equivalent to "4 right and 8 left" + * on the 16-bit sub-words. + */ +#if GIFT128_VARIANT != GIFT128_VARIANT_SMALL +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#else +/* The small variant needs to also undo some of the rotations that were + * done to generate the fixsliced version of the key schedule */ +#define gift128b_load_and_forward_schedule() \ + do { \ + w0 = ks->k[3]; \ + w1 = ks->k[1]; \ + w2 = ks->k[2]; \ + w3 = ks->k[0]; \ + gift128b_swap_move(w3, w3, 0x000000FFU, 24); \ + gift128b_swap_move(w3, w3, 0x00003333U, 18); \ + gift128b_swap_move(w3, w3, 0x000F000FU, 12); \ + gift128b_swap_move(w3, w3, 0x00550055U, 9); \ + gift128b_swap_move(w1, w1, 0x000000FFU, 24); \ + gift128b_swap_move(w1, w1, 0x00003333U, 18); \ + gift128b_swap_move(w1, w1, 0x000F000FU, 12); \ + gift128b_swap_move(w1, w1, 0x00550055U, 9); \ + gift128b_swap_move(w2, w2, 0x000000FFU, 24); \ + gift128b_swap_move(w2, w2, 0x000F000FU, 12); \ + gift128b_swap_move(w2, w2, 0x03030303U, 6); \ + gift128b_swap_move(w2, w2, 0x11111111U, 3); \ + gift128b_swap_move(w0, w0, 0x000000FFU, 24); \ + gift128b_swap_move(w0, w0, 0x000F000FU, 12); \ + gift128b_swap_move(w0, w0, 0x03030303U, 6); \ + gift128b_swap_move(w0, w0, 0x11111111U, 3); \ + w0 = ((w0 & 0xFFF00000U) >> 4) | ((w0 & 0x000F0000U) << 12) | \ + ((w0 & 0x000000FFU) << 8) | ((w0 & 0x0000FF00U) >> 8); \ + w1 = ((w1 & 0xFFF00000U) >> 4) | ((w1 & 0x000F0000U) << 12) | \ + ((w1 & 0x000000FFU) << 8) | ((w1 & 0x0000FF00U) >> 8); \ + w2 = ((w2 & 0xFFF00000U) >> 4) | ((w2 & 0x000F0000U) << 12) | \ + ((w2 & 0x000000FFU) << 8) | ((w2 & 0x0000FF00U) >> 8); \ + w3 = ((w3 & 0xFFF00000U) >> 4) | ((w3 & 0x000F0000U) << 12) | \ + ((w3 & 0x000000FFU) << 8) | ((w3 & 0x0000FF00U) >> 8); \ + } while (0) +#endif + +void gift128b_decrypt + (const gift128b_key_schedule_t *ks, unsigned char *output, + const unsigned char *input) +{ + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; + + /* Copy the ciphertext into the state buffer and convert from big endian */ + s0 = be_load_word32(input); + s1 = be_load_word32(input + 4); + s2 = be_load_word32(input + 8); + s3 = be_load_word32(input + 12); + + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } + + /* Pack the state into the plaintext buffer in big endian */ + be_store_word32(output, s0); + be_store_word32(output + 4, s1); + be_store_word32(output + 8, s2); + be_store_word32(output + 12, s3); +} + void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak) + const unsigned char *input, uint32_t tweak) { - uint32_t s0, s1, s2, s3, tword; + uint32_t s0, s1, s2, s3; + uint32_t w0, w1, w2, w3; + uint32_t temp; + uint8_t round; - /* Copy the ciphertext into the state buffer and convert from nibbles */ + /* Copy the plaintext into the state buffer and convert from nibbles */ gift128n_to_words(output, input); s0 = be_load_word32(output); s1 = be_load_word32(output + 4); s2 = be_load_word32(output + 8); s3 = be_load_word32(output + 12); - /* Perform all 40 rounds five at a time using the fixsliced method. - * Every 5 rounds except the first we add the tweak value to the state */ - tword = GIFT128_tweaks[tweak]; - gift128b_decrypt_5_rounds(ks->k + 70, GIFT128_RC + 35); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 60, GIFT128_RC + 30); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 50, GIFT128_RC + 25); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 40, GIFT128_RC + 20); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 30, GIFT128_RC + 15); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 20, GIFT128_RC + 10); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k + 10, GIFT128_RC + 5); - s0 ^= tword; - gift128b_decrypt_5_rounds(ks->k, GIFT128_RC); + /* Generate the decryption key at the end of the last round */ + gift128b_load_and_forward_schedule(); + + /* Perform all 40 rounds */ + for (round = 40; round > 0; --round) { + /* Rotate the key schedule backwards */ + temp = w0; + w0 = w1; + w1 = w2; + w2 = w3; + w3 = ((temp & 0x3FFF0000U) << 2) | ((temp & 0xC0000000U) >> 14) | + ((temp & 0x0000FFF0U) >> 4) | ((temp & 0x0000000FU) << 12); + + /* AddTweak - XOR in the tweak every 5 rounds except the last */ + if ((round % 5) == 0 && round < 40) + s0 ^= tweak; + + /* AddRoundKey - XOR in the key schedule and the round constant */ + s2 ^= w1; + s1 ^= w3; + s3 ^= 0x80000000U ^ GIFT128_RC[round - 1]; + + /* InvPermBits - apply the inverse of the 128-bit permutation */ + INV_PERM0(s0); + INV_PERM1(s1); + INV_PERM2(s2); + INV_PERM3(s3); + + /* InvSubCells - apply the inverse of the S-box */ + temp = s0; + s0 = s3; + s3 = temp; + s2 ^= s0 & s1; + s3 ^= 0xFFFFFFFFU; + s1 ^= s3; + s3 ^= s2; + s2 ^= s0 | s1; + s0 ^= s1 & s3; + s1 ^= s0 & s2; + } /* Pack the state into the plaintext buffer in nibble form */ be_store_word32(output, s0); @@ -847,3 +1492,7 @@ void gift128t_decrypt be_store_word32(output + 12, s3); gift128n_to_nibbles(output, output); } + +#endif /* GIFT128_VARIANT_SMALL || GIFT128_VARIANT_TINY */ + +#endif /* !GIFT128_VARIANT_ASM */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.h index 1ac40e5..f57d143 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128.h @@ -47,11 +47,13 @@ * in any of the NIST submissions so we don't bother with it in this library. * * References: https://eprint.iacr.org/2017/622.pdf, + * https://eprint.iacr.org/2020/412.pdf, * https://giftcipher.github.io/gift/ */ #include #include +#include "internal-gift128-config.h" #ifdef __cplusplus extern "C" { @@ -63,16 +65,23 @@ extern "C" { #define GIFT128_BLOCK_SIZE 16 /** - * \brief Number of round keys for the fixsliced representation of GIFT-128. + * \var GIFT128_ROUND_KEYS + * \brief Number of round keys for the GIFT-128 key schedule. */ +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY +#define GIFT128_ROUND_KEYS 4 +#elif GIFT128_VARIANT == GIFT128_VARIANT_SMALL +#define GIFT128_ROUND_KEYS 20 +#else #define GIFT128_ROUND_KEYS 80 +#endif /** * \brief Structure of the key schedule for GIFT-128 (bit-sliced). */ typedef struct { - /** Pre-computed round keys in the fixsliced form */ + /** Pre-computed round keys for bit-sliced GIFT-128 */ uint32_t k[GIFT128_ROUND_KEYS]; } gift128b_key_schedule_t; @@ -81,14 +90,9 @@ typedef struct * \brief Initializes the key schedule for GIFT-128 (bit-sliced). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128b_init - (gift128b_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128b_init(gift128b_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (bit-sliced). @@ -145,14 +149,9 @@ typedef gift128b_key_schedule_t gift128n_key_schedule_t; * \brief Initializes the key schedule for GIFT-128 (nibble-based). * * \param ks Points to the key schedule to initialize. - * \param key Points to the key data. - * \param key_len Length of the key data, which must be 16. - * - * \return Non-zero on success or zero if there is something wrong - * with the parameters. + * \param key Points to the 16 bytes of the key data. */ -int gift128n_init - (gift128n_key_schedule_t *ks, const unsigned char *key, size_t key_len); +void gift128n_init(gift128n_key_schedule_t *ks, const unsigned char *key); /** * \brief Encrypts a 128-bit block with GIFT-128 (nibble-based). @@ -182,13 +181,31 @@ void gift128n_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, const unsigned char *input); +/* 4-bit tweak values expanded to 32-bit for TweGIFT-128 */ +#define GIFT128T_TWEAK_0 0x00000000 /**< TweGIFT-128 tweak value 0 */ +#define GIFT128T_TWEAK_1 0xe1e1e1e1 /**< TweGIFT-128 tweak value 1 */ +#define GIFT128T_TWEAK_2 0xd2d2d2d2 /**< TweGIFT-128 tweak value 2 */ +#define GIFT128T_TWEAK_3 0x33333333 /**< TweGIFT-128 tweak value 3 */ +#define GIFT128T_TWEAK_4 0xb4b4b4b4 /**< TweGIFT-128 tweak value 4 */ +#define GIFT128T_TWEAK_5 0x55555555 /**< TweGIFT-128 tweak value 5 */ +#define GIFT128T_TWEAK_6 0x66666666 /**< TweGIFT-128 tweak value 6 */ +#define GIFT128T_TWEAK_7 0x87878787 /**< TweGIFT-128 tweak value 7 */ +#define GIFT128T_TWEAK_8 0x78787878 /**< TweGIFT-128 tweak value 8 */ +#define GIFT128T_TWEAK_9 0x99999999 /**< TweGIFT-128 tweak value 9 */ +#define GIFT128T_TWEAK_10 0xaaaaaaaa /**< TweGIFT-128 tweak value 10 */ +#define GIFT128T_TWEAK_11 0x4b4b4b4b /**< TweGIFT-128 tweak value 11 */ +#define GIFT128T_TWEAK_12 0xcccccccc /**< TweGIFT-128 tweak value 12 */ +#define GIFT128T_TWEAK_13 0x2d2d2d2d /**< TweGIFT-128 tweak value 13 */ +#define GIFT128T_TWEAK_14 0x1e1e1e1e /**< TweGIFT-128 tweak value 14 */ +#define GIFT128T_TWEAK_15 0xffffffff /**< TweGIFT-128 tweak value 15 */ + /** * \brief Encrypts a 128-bit block with TweGIFT-128 (tweakable variant). * * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -200,7 +217,7 @@ void gift128n_decrypt */ void gift128t_encrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); /** * \brief Decrypts a 128-bit block with TweGIFT-128 (tweakable variant). @@ -208,7 +225,7 @@ void gift128t_encrypt * \param ks Points to the GIFT-128 key schedule. * \param output Output buffer which must be at least 16 bytes in length. * \param input Input buffer which must be at least 16 bytes in length. - * \param tweak 4-bit tweak value. + * \param tweak 4-bit tweak value expanded to 32-bit. * * The \a input and \a output buffers can be the same buffer for * in-place encryption. @@ -220,7 +237,7 @@ void gift128t_encrypt */ void gift128t_decrypt (const gift128n_key_schedule_t *ks, unsigned char *output, - const unsigned char *input, unsigned char tweak); + const unsigned char *input, uint32_t tweak); #ifdef __cplusplus } diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-avr.S new file mode 100644 index 0000000..641613a --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-avr.S @@ -0,0 +1,2104 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 40 +table_0: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + movw r30,r24 + movw r26,r22 +.L__stack_usage = 2 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + st Z,r18 + std Z+1,r19 + std Z+2,r20 + std Z+3,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+4,r18 + std Z+5,r19 + std Z+6,r20 + std Z+7,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+8,r18 + std Z+9,r19 + std Z+10,r20 + std Z+11,r21 + ld r21,X+ + ld r20,X+ + ld r19,X+ + ld r18,X+ + std Z+12,r18 + std Z+13,r19 + std Z+14,r20 + std Z+15,r21 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 36 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + mov r16,r1 +46: + rcall 199f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + rcall 199f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + rcall 199f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + rcall 199f + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + lsl r26 + rol r27 + adc r26,r1 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + ldi r17,40 + cpse r16,r17 + rjmp 46b + rjmp 548f +199: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + movw r18,r22 + movw r20,r2 + mov r0,r4 + and r0,r18 + eor r8,r0 + mov r0,r5 + and r0,r19 + eor r9,r0 + mov r0,r6 + and r0,r20 + eor r10,r0 + mov r0,r7 + and r0,r21 + eor r11,r0 + movw r22,r12 + movw r2,r14 + movw r12,r18 + movw r14,r20 + bst r22,1 + bld r0,0 + bst r22,4 + bld r22,1 + bst r2,0 + bld r22,4 + bst r22,2 + bld r2,0 + bst r23,0 + bld r22,2 + bst r22,3 + bld r23,0 + bst r23,4 + bld r22,3 + bst r2,3 + bld r23,4 + bst r23,6 + bld r2,3 + bst r3,3 + bld r23,6 + bst r23,5 + bld r3,3 + bst r2,7 + bld r23,5 + bst r3,6 + bld r2,7 + bst r3,1 + bld r3,6 + bst r22,5 + bld r3,1 + bst r2,4 + bld r22,5 + bst r2,2 + bld r2,4 + bst r23,2 + bld r2,2 + bst r23,3 + bld r23,2 + bst r23,7 + bld r23,3 + bst r3,7 + bld r23,7 + bst r3,5 + bld r3,7 + bst r2,5 + bld r3,5 + bst r2,6 + bld r2,5 + bst r3,2 + bld r2,6 + bst r23,1 + bld r3,2 + bst r22,7 + bld r23,1 + bst r3,4 + bld r22,7 + bst r2,1 + bld r3,4 + bst r22,6 + bld r2,1 + bst r3,0 + bld r22,6 + bst r0,0 + bld r3,0 + bst r4,0 + bld r0,0 + bst r4,1 + bld r4,0 + bst r4,5 + bld r4,1 + bst r6,5 + bld r4,5 + bst r6,7 + bld r6,5 + bst r7,7 + bld r6,7 + bst r7,6 + bld r7,7 + bst r7,2 + bld r7,6 + bst r5,2 + bld r7,2 + bst r5,0 + bld r5,2 + bst r0,0 + bld r5,0 + bst r4,2 + bld r0,0 + bst r5,1 + bld r4,2 + bst r4,4 + bld r5,1 + bst r6,1 + bld r4,4 + bst r4,7 + bld r6,1 + bst r7,5 + bld r4,7 + bst r6,6 + bld r7,5 + bst r7,3 + bld r6,6 + bst r5,6 + bld r7,3 + bst r7,0 + bld r5,6 + bst r0,0 + bld r7,0 + bst r4,3 + bld r0,0 + bst r5,5 + bld r4,3 + bst r6,4 + bld r5,5 + bst r6,3 + bld r6,4 + bst r5,7 + bld r6,3 + bst r7,4 + bld r5,7 + bst r6,2 + bld r7,4 + bst r5,3 + bld r6,2 + bst r5,4 + bld r5,3 + bst r6,0 + bld r5,4 + bst r0,0 + bld r6,0 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r8,2 + bld r8,0 + bst r9,2 + bld r8,2 + bst r9,1 + bld r9,2 + bst r8,5 + bld r9,1 + bst r10,6 + bld r8,5 + bst r11,0 + bld r10,6 + bst r8,3 + bld r11,0 + bst r9,6 + bld r8,3 + bst r11,1 + bld r9,6 + bst r8,7 + bld r11,1 + bst r11,6 + bld r8,7 + bst r11,3 + bld r11,6 + bst r9,7 + bld r11,3 + bst r11,5 + bld r9,7 + bst r10,7 + bld r11,5 + bst r11,4 + bld r10,7 + bst r10,3 + bld r11,4 + bst r9,4 + bld r10,3 + bst r10,1 + bld r9,4 + bst r8,4 + bld r10,1 + bst r10,2 + bld r8,4 + bst r9,0 + bld r10,2 + bst r8,1 + bld r9,0 + bst r8,6 + bld r8,1 + bst r11,2 + bld r8,6 + bst r9,3 + bld r11,2 + bst r9,5 + bld r9,3 + bst r10,5 + bld r9,5 + bst r10,4 + bld r10,5 + bst r10,0 + bld r10,4 + bst r0,0 + bld r10,0 + bst r12,0 + bld r0,0 + bst r12,3 + bld r12,0 + bst r13,7 + bld r12,3 + bst r15,6 + bld r13,7 + bst r15,0 + bld r15,6 + bst r0,0 + bld r15,0 + bst r12,1 + bld r0,0 + bst r12,7 + bld r12,1 + bst r15,7 + bld r12,7 + bst r15,4 + bld r15,7 + bst r14,0 + bld r15,4 + bst r0,0 + bld r14,0 + bst r12,2 + bld r0,0 + bst r13,3 + bld r12,2 + bst r13,6 + bld r13,3 + bst r15,2 + bld r13,6 + bst r13,0 + bld r15,2 + bst r0,0 + bld r13,0 + bst r12,4 + bld r0,0 + bst r14,3 + bld r12,4 + bst r13,5 + bld r14,3 + bst r14,6 + bld r13,5 + bst r15,1 + bld r14,6 + bst r0,0 + bld r15,1 + bst r12,5 + bld r0,0 + bst r14,7 + bld r12,5 + bst r15,5 + bld r14,7 + bst r14,4 + bld r15,5 + bst r14,1 + bld r14,4 + bst r0,0 + bld r14,1 + bst r12,6 + bld r0,0 + bst r15,3 + bld r12,6 + bst r13,4 + bld r15,3 + bst r14,2 + bld r13,4 + bst r13,1 + bld r14,2 + bst r0,0 + bld r13,1 + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + inc r16 + ret +548: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r17,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-full-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-full-avr.S new file mode 100644 index 0000000..ff11875 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-full-avr.S @@ -0,0 +1,5037 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_FULL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 18 + ld r13,X+ + ld r12,X+ + ld r11,X+ + ld r10,X+ + ld r5,X+ + ld r4,X+ + ld r3,X+ + ld r2,X+ + ld r9,X+ + ld r8,X+ + ld r7,X+ + ld r6,X+ + ld r29,X+ + ld r28,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + ldi r24,4 +33: + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r29 + ror r28 + ror r0 + lsr r29 + ror r28 + ror r0 + or r29,r0 + st Z+,r22 + st Z+,r23 + st Z+,r28 + st Z+,r29 + mov r0,r22 + mov r22,r2 + mov r2,r0 + mov r0,r23 + mov r23,r3 + mov r3,r0 + mov r0,r28 + mov r28,r4 + mov r4,r0 + mov r0,r29 + mov r29,r5 + mov r5,r0 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + lsl r6 + rol r7 + adc r6,r1 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + mov r0,r6 + mov r6,r10 + mov r10,r0 + mov r0,r7 + mov r7,r11 + mov r11,r0 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r28,Z+2 + ldd r29,Z+3 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + st Z,r29 + std Z+1,r23 + std Z+2,r28 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r28,Z+6 + ldd r29,Z+7 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+4,r29 + std Z+5,r23 + std Z+6,r28 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r28,Z+10 + ldd r29,Z+11 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+8,r29 + std Z+9,r23 + std Z+10,r28 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r28,Z+14 + ldd r29,Z+15 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+12,r29 + std Z+13,r23 + std Z+14,r28 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+16,r29 + std Z+17,r23 + std Z+18,r28 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+20,r29 + std Z+21,r23 + std Z+22,r28 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+24,r29 + std Z+25,r23 + std Z+26,r28 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r28 + eor r21,r29 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + movw r18,r22 + movw r20,r28 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r28,r20 + eor r29,r21 + std Z+28,r29 + std Z+29,r23 + std Z+30,r28 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + adiw r30,40 + movw r26,r30 + subi r26,80 + sbc r27,r1 + ldi r24,6 +1274: + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r2 + eor r19,r3 + andi r18,51 + andi r19,51 + eor r2,r18 + eor r3,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + st Z,r2 + std Z+1,r3 + std Z+2,r4 + std Z+3,r5 + movw r18,r22 + movw r20,r28 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + andi r28,204 + andi r29,204 + or r28,r21 + or r29,r18 + or r22,r19 + or r23,r20 + movw r18,r28 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r28 + eor r19,r29 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r28,r18 + eor r29,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r28 + std Z+5,r29 + std Z+6,r22 + std Z+7,r23 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + swap r3 + lsl r4 + adc r4,r1 + lsl r4 + adc r4,r1 + swap r5 + std Z+8,r2 + std Z+9,r3 + std Z+10,r4 + std Z+11,r5 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r28 + adc r28,r1 + lsl r29 + adc r29,r1 + lsl r29 + adc r29,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r28 + std Z+15,r29 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + ldi r25,85 + and r2,r25 + and r3,r25 + and r4,r25 + and r5,r25 + or r2,r19 + or r3,r20 + or r4,r21 + or r5,r18 + std Z+16,r4 + std Z+17,r5 + std Z+18,r2 + std Z+19,r3 + movw r18,r22 + movw r20,r28 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + andi r28,170 + andi r29,170 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + lsl r22 + rol r23 + rol r28 + rol r29 + adc r22,r1 + or r22,r18 + or r23,r19 + or r28,r20 + or r29,r21 + std Z+20,r29 + std Z+21,r22 + std Z+22,r23 + std Z+23,r28 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r4 + eor r21,r5 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r4,r20 + eor r5,r21 + movw r18,r2 + movw r20,r4 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r14,r18 + movw r16,r20 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + lsr r17 + ror r16 + ror r15 + ror r14 + eor r14,r18 + eor r15,r19 + eor r16,r20 + eor r17,r21 + ldi r25,8 + and r14,r25 + and r15,r25 + andi r16,8 + andi r17,8 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + lsl r14 + rol r15 + rol r16 + rol r17 + eor r18,r14 + eor r19,r15 + eor r20,r16 + eor r21,r17 + ldi r17,15 + and r2,r17 + and r3,r17 + and r4,r17 + and r5,r17 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + std Z+24,r2 + std Z+25,r3 + std Z+26,r4 + std Z+27,r5 + movw r18,r28 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r2,r22 + movw r4,r28 + ldi r16,1 + and r2,r16 + and r3,r16 + and r4,r16 + and r5,r16 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + lsl r2 + rol r3 + rol r4 + rol r5 + or r2,r18 + or r3,r19 + movw r18,r28 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r2,r18 + or r3,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r4,r18 + or r5,r19 + movw r18,r22 + movw r20,r28 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r2,r18 + or r3,r19 + or r4,r20 + or r5,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r4,r22 + or r5,r23 + std Z+28,r2 + std Z+29,r3 + std Z+30,r4 + std Z+31,r5 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + mov r0,r1 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + lsr r5 + ror r4 + ror r0 + or r5,r0 + std Z+32,r3 + std Z+33,r2 + std Z+34,r4 + std Z+35,r5 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r28 + mov r28,r29 + mov r29,r0 + lsl r28 + rol r29 + adc r28,r1 + lsl r28 + rol r29 + adc r28,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r28 + std Z+39,r29 + dec r24 + breq 1733f + adiw r30,40 + rjmp 1274b +1733: + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r30 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rcall 27f + rjmp 765f +27: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +765: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e +.L__stack_usage = 19 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r30 + subi r26,192 + sbci r27,254 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,160 + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rcall 30f + rjmp 768f +30: + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r11 + mov r11,r10 + mov r10,r9 + mov r9,r8 + mov r8,r0 + mov r0,r12 + mov r12,r13 + mov r13,r14 + mov r14,r15 + mov r15,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r1 + lsr r22 + ror r0 + lsr r22 + ror r0 + or r22,r0 + mov r0,r1 + lsr r23 + ror r0 + lsr r23 + ror r0 + or r23,r0 + mov r0,r1 + lsr r2 + ror r0 + lsr r2 + ror r0 + or r2,r0 + mov r0,r1 + lsr r3 + ror r0 + lsr r3 + ror r0 + or r3,r0 + swap r4 + swap r5 + swap r6 + swap r7 + lsl r8 + adc r8,r1 + lsl r8 + adc r8,r1 + lsl r9 + adc r9,r1 + lsl r9 + adc r9,r1 + lsl r10 + adc r10,r1 + lsl r10 + adc r10,r1 + lsl r11 + adc r11,r1 + lsl r11 + adc r11,r1 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + mov r0,r1 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + lsr r9 + ror r8 + ror r0 + or r9,r0 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + com r22 + com r23 + com r2 + com r3 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + dec r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + dec r30 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + ld r21,-X + ld r20,-X + ld r19,-X + ld r18,-X + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,119 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,17 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +768: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+1 + ldd r27,Y+2 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + pop r0 + pop r0 + pop r17 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-small-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-small-avr.S new file mode 100644 index 0000000..77ef9fd --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-small-avr.S @@ -0,0 +1,6053 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_SMALL + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +33: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5115f + rjmp 33b +5115: + subi r30,80 + sbc r31,r1 + ldi r24,2 +119: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1268f + adiw r30,40 + rjmp 119b +1268: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ldi r24,20 +1: + ld r22,Z+ + ld r23,Z+ + ld r2,Z+ + ld r3,Z+ + std Y+1,r22 + std Y+2,r23 + std Y+3,r2 + std Y+4,r3 + adiw r28,4 + dec r24 + brne 1b + subi r28,80 + sbc r29,r1 + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 73f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 811f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 73f + rcall 73f + rjmp 1285f +73: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +811: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +1285: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r25 + mov r25,r26 + mov r26,r0 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +678: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 830f + cpse r16,r1 + rjmp 678b + rjmp 1175f +830: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +1175: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-tiny-avr.S b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-tiny-avr.S new file mode 100644 index 0000000..e7a03f1 --- /dev/null +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-gift128b-tiny-avr.S @@ -0,0 +1,6766 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + +#include "internal-gift128-config.h" + +#if GIFT128_VARIANT == GIFT128_VARIANT_TINY + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_0, @object + .size table_0, 160 +table_0: + .byte 8 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 128 + .byte 1 + .byte 128 + .byte 2 + .byte 0 + .byte 0 + .byte 84 + .byte 129 + .byte 1 + .byte 1 + .byte 1 + .byte 31 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 136 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 81 + .byte 128 + .byte 1 + .byte 3 + .byte 3 + .byte 47 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 96 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 65 + .byte 128 + .byte 0 + .byte 3 + .byte 3 + .byte 39 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 224 + .byte 1 + .byte 64 + .byte 2 + .byte 0 + .byte 80 + .byte 17 + .byte 128 + .byte 1 + .byte 2 + .byte 3 + .byte 43 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 8 + .byte 8 + .byte 16 + .byte 0 + .byte 64 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 64 + .byte 1 + .byte 128 + .byte 0 + .byte 2 + .byte 2 + .byte 33 + .byte 0 + .byte 0 + .byte 128 + .byte 128 + .byte 0 + .byte 0 + .byte 16 + .byte 0 + .byte 192 + .byte 1 + .byte 0 + .byte 2 + .byte 0 + .byte 0 + .byte 81 + .byte 128 + .byte 1 + .byte 1 + .byte 3 + .byte 46 + .byte 0 + .byte 0 + .byte 128 + .byte 0 + .byte 136 + .byte 8 + .byte 16 + .byte 0 + .byte 32 + .byte 1 + .byte 96 + .byte 2 + .byte 0 + .byte 80 + .byte 64 + .byte 128 + .byte 0 + .byte 3 + .byte 1 + .byte 6 + .byte 0 + .byte 0 + .byte 128 + .byte 8 + .byte 136 + .byte 0 + .byte 16 + .byte 0 + .byte 160 + .byte 1 + .byte 192 + .byte 2 + .byte 0 + .byte 80 + .byte 20 + .byte 129 + .byte 1 + .byte 2 + .byte 1 + .byte 26 + .byte 0 + .byte 0 + .byte 128 + + .text +.global gift128b_init + .type gift128b_init, @function +gift128b_init: + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 + movw r26,r22 +.L__stack_usage = 16 + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + st Z,r22 + std Z+1,r23 + std Z+2,r2 + std Z+3,r3 + std Z+4,r4 + std Z+5,r5 + std Z+6,r6 + std Z+7,r7 + std Z+8,r8 + std Z+9,r9 + std Z+10,r10 + std Z+11,r11 + std Z+12,r12 + std Z+13,r13 + std Z+14,r14 + std Z+15,r15 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + ret + .size gift128b_init, .-gift128b_init + + .text +.global gift128b_encrypt + .type gift128b_encrypt, @function +gift128b_encrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt, .-gift128b_encrypt + + .text +.global gift128b_encrypt_preloaded + .type gift128b_encrypt_preloaded, @function +gift128b_encrypt_preloaded: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + subi r28,80 + sbci r29,0 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 100 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r4,Z+4 + ldd r5,Z+5 + ldd r6,Z+6 + ldd r7,Z+7 + ldd r8,Z+8 + ldd r9,Z+9 + ldd r10,Z+10 + ldd r11,Z+11 + ldd r12,Z+12 + ldd r13,Z+13 + ldd r14,Z+14 + ldd r15,Z+15 + movw r30,r28 + adiw r30,1 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + ldi r24,4 +35: + st Z+,r4 + st Z+,r5 + st Z+,r6 + st Z+,r7 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + lsl r22 + rol r23 + adc r22,r1 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + st Z+,r22 + st Z+,r23 + st Z+,r2 + st Z+,r3 + mov r0,r22 + mov r22,r4 + mov r4,r0 + mov r0,r23 + mov r23,r5 + mov r5,r0 + mov r0,r2 + mov r2,r6 + mov r6,r0 + mov r0,r3 + mov r3,r7 + mov r7,r0 + st Z+,r12 + st Z+,r13 + st Z+,r14 + st Z+,r15 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + mov r0,r1 + lsr r11 + ror r10 + ror r0 + lsr r11 + ror r10 + ror r0 + or r11,r0 + st Z+,r8 + st Z+,r9 + st Z+,r10 + st Z+,r11 + mov r0,r8 + mov r8,r12 + mov r12,r0 + mov r0,r9 + mov r9,r13 + mov r13,r0 + mov r0,r10 + mov r10,r14 + mov r14,r0 + mov r0,r11 + mov r11,r15 + mov r15,r0 + dec r24 + breq 5117f + rjmp 35b +5117: + subi r30,80 + sbc r31,r1 + ldi r24,2 +121: + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + st Z,r3 + std Z+1,r23 + std Z+2,r2 + std Z+3,r22 + ldd r22,Z+4 + ldd r23,Z+5 + ldd r2,Z+6 + ldd r3,Z+7 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,85 + mov r19,r1 + andi r20,85 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+4,r3 + std Z+5,r23 + std Z+6,r2 + std Z+7,r22 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+8,r3 + std Z+9,r23 + std Z+10,r2 + std Z+11,r22 + ldd r22,Z+12 + ldd r23,Z+13 + ldd r2,Z+14 + ldd r3,Z+15 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,17 + andi r19,17 + andi r20,17 + andi r21,17 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,15 + mov r19,r1 + andi r20,15 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+12,r3 + std Z+13,r23 + std Z+14,r2 + std Z+15,r22 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+16,r3 + std Z+17,r23 + std Z+18,r2 + std Z+19,r22 + ldd r22,Z+20 + ldd r23,Z+21 + ldd r2,Z+22 + ldd r3,Z+23 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r19 + rol r20 + rol r21 + rol r0 + movw r18,r20 + mov r20,r0 + mov r21,r1 + eor r18,r22 + eor r19,r23 + andi r18,170 + andi r19,170 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r0,r1 + lsr r20 + ror r19 + ror r18 + ror r0 + movw r20,r18 + mov r19,r0 + mov r18,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + movw r18,r20 + mov r20,r1 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,51 + andi r19,51 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+20,r3 + std Z+21,r23 + std Z+22,r2 + std Z+23,r22 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+24,r3 + std Z+25,r23 + std Z+26,r2 + std Z+27,r22 + ldd r22,Z+28 + ldd r23,Z+29 + ldd r2,Z+30 + ldd r3,Z+31 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,10 + andi r19,10 + andi r20,10 + andi r21,10 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r0,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + lsl r18 + rol r19 + rol r20 + rol r21 + rol r0 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r0 + eor r18,r22 + eor r19,r23 + eor r20,r2 + eor r21,r3 + andi r18,204 + mov r19,r1 + andi r20,204 + mov r21,r1 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + lsr r21 + ror r20 + ror r19 + ror r18 + ror r0 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r0 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + movw r18,r22 + movw r20,r2 + mov r18,r19 + mov r19,r20 + mov r20,r21 + mov r21,r1 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r22 + eor r19,r23 + andi r18,240 + andi r19,240 + eor r22,r18 + eor r23,r19 + mov r20,r1 + mov r21,r1 + mov r21,r20 + mov r20,r19 + mov r19,r18 + mov r18,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + std Z+28,r3 + std Z+29,r23 + std Z+30,r2 + std Z+31,r22 + dec r24 + breq 1270f + adiw r30,40 + rjmp 121b +1270: + ld r22,X+ + ld r23,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + ld r14,X+ + ld r15,X+ + movw r26,r28 + adiw r26,1 + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,20 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,40 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,60 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,80 + sbiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,100 + adiw r26,40 + rcall 1329f +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + rcall 2067f + ldi r30,lo8(table_0) + ldi r31,hi8(table_0) +#if defined(RAMPZ) + ldi r24,hh8(table_0) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r24 +#endif + ldi r30,120 + sbiw r26,40 + rcall 1329f + rcall 1329f + rjmp 2541f +1329: + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,204 + andi r19,204 + andi r20,204 + andi r21,204 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + ldi r25,51 + and r4,r25 + and r5,r25 + and r6,r25 + and r7,r25 + or r4,r18 + or r5,r19 + or r6,r20 + or r7,r21 + movw r18,r8 + movw r20,r10 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,238 + andi r19,238 + andi r20,238 + andi r21,238 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + lsr r11 + ror r10 + ror r9 + ror r8 + ldi r24,17 + and r8,r24 + and r9,r24 + and r10,r24 + and r11,r24 + or r8,r18 + or r9,r19 + or r10,r20 + or r11,r21 + movw r18,r12 + movw r20,r14 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + andi r18,136 + andi r19,136 + andi r20,136 + andi r21,136 + lsr r15 + ror r14 + ror r13 + ror r12 + ldi r17,119 + and r12,r17 + and r13,r17 + and r14,r17 + and r15,r17 + or r12,r18 + or r13,r19 + or r14,r20 + or r15,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r1 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + lsr r3 + ror r2 + ror r0 + or r3,r0 + mov r0,r5 + mov r5,r4 + mov r4,r0 + mov r0,r7 + mov r7,r6 + mov r6,r0 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r8 + rol r9 + adc r8,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + lsl r10 + rol r11 + adc r10,r1 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + movw r18,r4 + movw r20,r6 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + mov r0,r10 + mov r10,r8 + mov r8,r0 + mov r0,r11 + mov r11,r9 + mov r9,r0 + movw r18,r8 + movw r20,r10 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r8 + eor r19,r9 + andi r18,85 + andi r19,85 + eor r8,r18 + eor r9,r19 + mov r20,r1 + mov r21,r1 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + mov r0,r14 + mov r14,r12 + mov r12,r0 + mov r0,r15 + mov r15,r13 + mov r13,r0 + movw r18,r14 + lsr r19 + ror r18 + eor r18,r14 + eor r19,r15 + andi r18,85 + andi r19,85 + eor r14,r18 + eor r15,r19 + lsl r18 + rol r19 + eor r14,r18 + eor r15,r19 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + mov r0,r12 + and r0,r8 + eor r4,r0 + mov r0,r13 + and r0,r9 + eor r5,r0 + mov r0,r14 + and r0,r10 + eor r6,r0 + mov r0,r15 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r22 + eor r12,r0 + mov r0,r5 + and r0,r23 + eor r13,r0 + mov r0,r6 + and r0,r2 + eor r14,r0 + mov r0,r7 + and r0,r3 + eor r15,r0 + mov r0,r12 + or r0,r4 + eor r8,r0 + mov r0,r13 + or r0,r5 + eor r9,r0 + mov r0,r14 + or r0,r6 + eor r10,r0 + mov r0,r15 + or r0,r7 + eor r11,r0 + eor r22,r8 + eor r23,r9 + eor r2,r10 + eor r3,r11 + eor r4,r22 + eor r5,r23 + eor r6,r2 + eor r7,r3 + com r22 + com r23 + com r2 + com r3 + mov r0,r12 + and r0,r4 + eor r8,r0 + mov r0,r13 + and r0,r5 + eor r9,r0 + mov r0,r14 + and r0,r6 + eor r10,r0 + mov r0,r15 + and r0,r7 + eor r11,r0 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + swap r4 + swap r5 + swap r6 + swap r7 + mov r0,r1 + lsr r8 + ror r0 + lsr r8 + ror r0 + or r8,r0 + mov r0,r1 + lsr r9 + ror r0 + lsr r9 + ror r0 + or r9,r0 + mov r0,r1 + lsr r10 + ror r0 + lsr r10 + ror r0 + or r10,r0 + mov r0,r1 + lsr r11 + ror r0 + lsr r11 + ror r0 + or r11,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r12,r18 + eor r13,r19 + eor r14,r20 + eor r15,r21 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + com r12 + com r13 + com r14 + com r15 + mov r0,r22 + and r0,r4 + eor r8,r0 + mov r0,r23 + and r0,r5 + eor r9,r0 + mov r0,r2 + and r0,r6 + eor r10,r0 + mov r0,r3 + and r0,r7 + eor r11,r0 + mov r0,r6 + mov r6,r4 + mov r4,r0 + mov r0,r7 + mov r7,r5 + mov r5,r0 + mov r0,r8 + mov r8,r9 + mov r9,r10 + mov r10,r11 + mov r11,r0 + mov r0,r15 + mov r15,r14 + mov r14,r13 + mov r13,r12 + mov r12,r0 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ld r18,X+ + ld r19,X+ + ld r20,X+ + ld r21,X+ + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r19,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r19,Z +#elif defined(__AVR_TINY__) + ld r19,Z +#else + lpm + mov r19,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r20,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r20,Z +#elif defined(__AVR_TINY__) + ld r20,Z +#else + lpm + mov r20,r0 +#endif + inc r30 +#if defined(RAMPZ) + elpm r21,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r21,Z +#elif defined(__AVR_TINY__) + ld r21,Z +#else + lpm + mov r21,r0 +#endif + inc r30 + eor r22,r18 + eor r23,r19 + eor r2,r20 + eor r3,r21 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + eor r12,r22 + eor r13,r23 + eor r14,r2 + eor r15,r3 + eor r22,r12 + eor r23,r13 + eor r2,r14 + eor r3,r15 + ret +2067: + movw r30,r26 + sbiw r30,40 + push r3 + push r2 + push r23 + push r22 + push r7 + push r6 + push r5 + push r4 + ld r22,Z + ldd r23,Z+1 + ldd r2,Z+2 + ldd r3,Z+3 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + movw r18,r26 + movw r20,r24 + movw r18,r20 + mov r20,r1 + mov r21,r1 + eor r18,r26 + eor r19,r27 + andi r18,51 + andi r19,51 + eor r26,r18 + eor r27,r19 + mov r20,r1 + mov r21,r1 + movw r20,r18 + mov r18,r1 + mov r19,r1 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,68 + andi r19,68 + andi r20,85 + andi r21,85 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + st Z,r26 + std Z+1,r27 + std Z+2,r24 + std Z+3,r25 + movw r18,r22 + movw r20,r2 + andi r18,51 + andi r19,51 + andi r20,51 + andi r21,51 + andi r22,204 + andi r23,204 + ldi r17,204 + and r2,r17 + and r3,r17 + or r2,r21 + or r3,r18 + or r22,r19 + or r23,r20 + movw r18,r2 + movw r20,r22 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r2 + eor r19,r3 + eor r20,r22 + eor r21,r23 + mov r18,r1 + andi r19,17 + andi r20,85 + andi r21,85 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r2,r18 + eor r3,r19 + eor r22,r20 + eor r23,r21 + std Z+4,r2 + std Z+5,r3 + std Z+6,r22 + std Z+7,r23 + ldd r22,Z+8 + ldd r23,Z+9 + ldd r2,Z+10 + ldd r3,Z+11 + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + lsl r26 + adc r26,r1 + lsl r26 + adc r26,r1 + swap r27 + lsl r24 + adc r24,r1 + lsl r24 + adc r24,r1 + swap r25 + std Z+8,r26 + std Z+9,r27 + std Z+10,r24 + std Z+11,r25 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r22 + adc r22,r1 + lsl r23 + adc r23,r1 + lsl r23 + adc r23,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r2 + adc r2,r1 + lsl r3 + adc r3,r1 + lsl r3 + adc r3,r1 + std Z+12,r22 + std Z+13,r23 + std Z+14,r2 + std Z+15,r3 + ldd r22,Z+16 + ldd r23,Z+17 + ldd r2,Z+18 + ldd r3,Z+19 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r24,Z+22 + ldd r25,Z+23 + movw r18,r26 + movw r20,r24 + andi r18,170 + andi r19,170 + andi r20,170 + andi r21,170 + andi r26,85 + andi r27,85 + andi r24,85 + andi r25,85 + or r26,r19 + or r27,r20 + or r24,r21 + or r25,r18 + std Z+16,r24 + std Z+17,r25 + std Z+18,r26 + std Z+19,r27 + movw r18,r22 + movw r20,r2 + andi r18,85 + andi r19,85 + andi r20,85 + andi r21,85 + andi r22,170 + andi r23,170 + ldi r16,170 + and r2,r16 + and r3,r16 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + lsl r22 + rol r23 + rol r2 + rol r3 + adc r22,r1 + or r22,r18 + or r23,r19 + or r2,r20 + or r3,r21 + std Z+20,r3 + std Z+21,r22 + std Z+22,r23 + std Z+23,r2 + ldd r22,Z+24 + ldd r23,Z+25 + ldd r2,Z+26 + ldd r3,Z+27 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r24,Z+30 + ldd r25,Z+31 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + lsr r21 + ror r20 + ror r19 + ror r18 + eor r18,r26 + eor r19,r27 + eor r20,r24 + eor r21,r25 + andi r18,3 + andi r19,3 + andi r20,3 + andi r21,3 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + lsl r18 + rol r19 + rol r20 + rol r21 + lsl r18 + rol r19 + rol r20 + rol r21 + eor r26,r18 + eor r27,r19 + eor r24,r20 + eor r25,r21 + movw r18,r26 + movw r20,r24 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,120 + andi r19,120 + andi r20,120 + andi r21,120 + movw r4,r18 + movw r6,r20 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + lsr r7 + ror r6 + ror r5 + ror r4 + eor r4,r18 + eor r5,r19 + eor r6,r20 + eor r7,r21 + ldi r16,8 + and r4,r16 + and r5,r16 + and r6,r16 + and r7,r16 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + lsl r4 + rol r5 + rol r6 + rol r7 + eor r18,r4 + eor r19,r5 + eor r20,r6 + eor r21,r7 + andi r26,15 + andi r27,15 + andi r24,15 + andi r25,15 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + std Z+24,r26 + std Z+25,r27 + std Z+26,r24 + std Z+27,r25 + movw r18,r2 + lsr r19 + ror r18 + lsr r19 + ror r18 + andi r18,48 + andi r19,48 + movw r26,r22 + movw r24,r2 + andi r26,1 + andi r27,1 + andi r24,1 + andi r25,1 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + lsl r26 + rol r27 + rol r24 + rol r25 + or r26,r18 + or r27,r19 + movw r18,r2 + lsl r18 + rol r19 + lsl r18 + rol r19 + andi r18,192 + andi r19,192 + or r26,r18 + or r27,r19 + movw r18,r22 + andi r18,224 + andi r19,224 + lsr r19 + ror r18 + or r24,r18 + or r25,r19 + movw r18,r22 + movw r20,r2 + lsr r21 + ror r20 + ror r19 + ror r18 + andi r18,7 + andi r19,7 + andi r20,7 + andi r21,7 + or r26,r18 + or r27,r19 + or r24,r20 + or r25,r21 + andi r22,16 + andi r23,16 + lsl r22 + rol r23 + lsl r22 + rol r23 + lsl r22 + rol r23 + or r24,r22 + or r25,r23 + std Z+28,r26 + std Z+29,r27 + std Z+30,r24 + std Z+31,r25 + ldd r22,Z+32 + ldd r23,Z+33 + ldd r2,Z+34 + ldd r3,Z+35 + ldd r26,Z+36 + ldd r27,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Z+32,r27 + std Z+33,r26 + std Z+34,r24 + std Z+35,r25 + mov r0,r1 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + lsr r23 + ror r22 + ror r0 + or r23,r0 + mov r0,r2 + mov r2,r3 + mov r3,r0 + lsl r2 + rol r3 + adc r2,r1 + lsl r2 + rol r3 + adc r2,r1 + std Z+36,r22 + std Z+37,r23 + std Z+38,r2 + std Z+39,r3 + pop r4 + pop r5 + pop r6 + pop r7 + pop r22 + pop r23 + pop r2 + pop r3 + movw r26,r30 + ret +2541: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + subi r28,175 + sbci r29,255 + ld r26,Y+ + ld r27,Y + subi r28,82 + sbc r29,r1 + st X+,r22 + st X+,r23 + st X+,r2 + st X+,r3 + st X+,r4 + st X+,r5 + st X+,r6 + st X+,r7 + st X+,r8 + st X+,r9 + st X+,r10 + st X+,r11 + st X+,r12 + st X+,r13 + st X+,r14 + st X+,r15 + subi r28,174 + sbci r29,255 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_encrypt_preloaded, .-gift128b_encrypt_preloaded + + .section .progmem.data,"a",@progbits + .p2align 8 + .type table_1, @object + .size table_1, 40 +table_1: + .byte 1 + .byte 3 + .byte 7 + .byte 15 + .byte 31 + .byte 62 + .byte 61 + .byte 59 + .byte 55 + .byte 47 + .byte 30 + .byte 60 + .byte 57 + .byte 51 + .byte 39 + .byte 14 + .byte 29 + .byte 58 + .byte 53 + .byte 43 + .byte 22 + .byte 44 + .byte 24 + .byte 48 + .byte 33 + .byte 2 + .byte 5 + .byte 11 + .byte 23 + .byte 46 + .byte 28 + .byte 56 + .byte 49 + .byte 35 + .byte 6 + .byte 13 + .byte 27 + .byte 54 + .byte 45 + .byte 26 + + .text +.global gift128b_decrypt + .type gift128b_decrypt, @function +gift128b_decrypt: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r23 + push r22 + movw r30,r24 + movw r26,r20 + in r28,0x3d + in r29,0x3e + sbiw r28,16 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 +.L__stack_usage = 35 + ld r3,X+ + ld r2,X+ + ld r23,X+ + ld r22,X+ + ld r7,X+ + ld r6,X+ + ld r5,X+ + ld r4,X+ + ld r11,X+ + ld r10,X+ + ld r9,X+ + ld r8,X+ + ld r15,X+ + ld r14,X+ + ld r13,X+ + ld r12,X+ + ldd r26,Z+12 + ldd r27,Z+13 + ldd r24,Z+14 + ldd r25,Z+15 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Z+4 + ldd r27,Z+5 + ldd r24,Z+6 + ldd r25,Z+7 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Z+8 + ldd r27,Z+9 + ldd r24,Z+10 + ldd r25,Z+11 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ld r26,Z + ldd r27,Z+1 + ldd r24,Z+2 + ldd r25,Z+3 + mov r0,r27 + mov r27,r26 + mov r26,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + lsr r25 + ror r24 + ror r0 + or r25,r0 + ldi r30,lo8(table_1) + ldi r31,hi8(table_1) +#if defined(RAMPZ) + ldi r17,hh8(table_1) + in r0,_SFR_IO_ADDR(RAMPZ) + push r0 + out _SFR_IO_ADDR(RAMPZ),r17 +#endif + ldi r16,40 +114: + ldd r0,Y+9 + eor r8,r0 + ldd r0,Y+10 + eor r9,r0 + ldd r0,Y+11 + eor r10,r0 + ldd r0,Y+12 + eor r11,r0 + std Y+13,r26 + std Y+14,r27 + std Y+15,r24 + std Y+16,r25 + ldd r26,Y+1 + ldd r27,Y+2 + ldd r24,Y+3 + ldd r25,Y+4 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+13 + eor r8,r0 + ldd r0,Y+14 + eor r9,r0 + ldd r0,Y+15 + eor r10,r0 + ldd r0,Y+16 + eor r11,r0 + std Y+1,r26 + std Y+2,r27 + std Y+3,r24 + std Y+4,r25 + ldd r26,Y+5 + ldd r27,Y+6 + ldd r24,Y+7 + ldd r25,Y+8 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+1 + eor r8,r0 + ldd r0,Y+2 + eor r9,r0 + ldd r0,Y+3 + eor r10,r0 + ldd r0,Y+4 + eor r11,r0 + std Y+5,r26 + std Y+6,r27 + std Y+7,r24 + std Y+8,r25 + ldd r26,Y+9 + ldd r27,Y+10 + ldd r24,Y+11 + ldd r25,Y+12 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + ldd r0,Y+5 + eor r8,r0 + ldd r0,Y+6 + eor r9,r0 + ldd r0,Y+7 + eor r10,r0 + ldd r0,Y+8 + eor r11,r0 + std Y+9,r26 + std Y+10,r27 + std Y+11,r24 + std Y+12,r25 + ldd r26,Y+13 + ldd r27,Y+14 + ldd r24,Y+15 + ldd r25,Y+16 + mov r0,r1 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + lsr r27 + ror r26 + ror r0 + or r27,r0 + lsl r24 + rol r25 + adc r24,r1 + lsl r24 + rol r25 + adc r24,r1 + rcall 266f + cpse r16,r1 + rjmp 114b + rjmp 611f +266: + eor r4,r26 + eor r5,r27 + eor r6,r24 + eor r7,r25 + ldi r18,128 + eor r15,r18 + dec r16 + mov r30,r16 +#if defined(RAMPZ) + elpm r18,Z +#elif defined(__AVR_HAVE_LPMX__) + lpm r18,Z +#elif defined(__AVR_TINY__) + ld r18,Z +#else + lpm + mov r18,r0 +#endif + eor r12,r18 + bst r22,1 + bld r0,0 + bst r3,0 + bld r22,1 + bst r22,6 + bld r3,0 + bst r2,1 + bld r22,6 + bst r3,4 + bld r2,1 + bst r22,7 + bld r3,4 + bst r23,1 + bld r22,7 + bst r3,2 + bld r23,1 + bst r2,6 + bld r3,2 + bst r2,5 + bld r2,6 + bst r3,5 + bld r2,5 + bst r3,7 + bld r3,5 + bst r23,7 + bld r3,7 + bst r23,3 + bld r23,7 + bst r23,2 + bld r23,3 + bst r2,2 + bld r23,2 + bst r2,4 + bld r2,2 + bst r22,5 + bld r2,4 + bst r3,1 + bld r22,5 + bst r3,6 + bld r3,1 + bst r2,7 + bld r3,6 + bst r23,5 + bld r2,7 + bst r3,3 + bld r23,5 + bst r23,6 + bld r3,3 + bst r2,3 + bld r23,6 + bst r23,4 + bld r2,3 + bst r22,3 + bld r23,4 + bst r23,0 + bld r22,3 + bst r22,2 + bld r23,0 + bst r2,0 + bld r22,2 + bst r22,4 + bld r2,0 + bst r0,0 + bld r22,4 + bst r4,0 + bld r0,0 + bst r5,0 + bld r4,0 + bst r5,2 + bld r5,0 + bst r7,2 + bld r5,2 + bst r7,6 + bld r7,2 + bst r7,7 + bld r7,6 + bst r6,7 + bld r7,7 + bst r6,5 + bld r6,7 + bst r4,5 + bld r6,5 + bst r4,1 + bld r4,5 + bst r0,0 + bld r4,1 + bst r4,2 + bld r0,0 + bst r7,0 + bld r4,2 + bst r5,6 + bld r7,0 + bst r7,3 + bld r5,6 + bst r6,6 + bld r7,3 + bst r7,5 + bld r6,6 + bst r4,7 + bld r7,5 + bst r6,1 + bld r4,7 + bst r4,4 + bld r6,1 + bst r5,1 + bld r4,4 + bst r0,0 + bld r5,1 + bst r4,3 + bld r0,0 + bst r6,0 + bld r4,3 + bst r5,4 + bld r6,0 + bst r5,3 + bld r5,4 + bst r6,2 + bld r5,3 + bst r7,4 + bld r6,2 + bst r5,7 + bld r7,4 + bst r6,3 + bld r5,7 + bst r6,4 + bld r6,3 + bst r5,5 + bld r6,4 + bst r0,0 + bld r5,5 + bst r4,6 + bld r0,0 + bst r7,1 + bld r4,6 + bst r0,0 + bld r7,1 + bst r8,0 + bld r0,0 + bst r10,0 + bld r8,0 + bst r10,4 + bld r10,0 + bst r10,5 + bld r10,4 + bst r9,5 + bld r10,5 + bst r9,3 + bld r9,5 + bst r11,2 + bld r9,3 + bst r8,6 + bld r11,2 + bst r8,1 + bld r8,6 + bst r9,0 + bld r8,1 + bst r10,2 + bld r9,0 + bst r8,4 + bld r10,2 + bst r10,1 + bld r8,4 + bst r9,4 + bld r10,1 + bst r10,3 + bld r9,4 + bst r11,4 + bld r10,3 + bst r10,7 + bld r11,4 + bst r11,5 + bld r10,7 + bst r9,7 + bld r11,5 + bst r11,3 + bld r9,7 + bst r11,6 + bld r11,3 + bst r8,7 + bld r11,6 + bst r11,1 + bld r8,7 + bst r9,6 + bld r11,1 + bst r8,3 + bld r9,6 + bst r11,0 + bld r8,3 + bst r10,6 + bld r11,0 + bst r8,5 + bld r10,6 + bst r9,1 + bld r8,5 + bst r9,2 + bld r9,1 + bst r8,2 + bld r9,2 + bst r0,0 + bld r8,2 + bst r12,0 + bld r0,0 + bst r15,0 + bld r12,0 + bst r15,6 + bld r15,0 + bst r13,7 + bld r15,6 + bst r12,3 + bld r13,7 + bst r0,0 + bld r12,3 + bst r12,1 + bld r0,0 + bst r14,0 + bld r12,1 + bst r15,4 + bld r14,0 + bst r15,7 + bld r15,4 + bst r12,7 + bld r15,7 + bst r0,0 + bld r12,7 + bst r12,2 + bld r0,0 + bst r13,0 + bld r12,2 + bst r15,2 + bld r13,0 + bst r13,6 + bld r15,2 + bst r13,3 + bld r13,6 + bst r0,0 + bld r13,3 + bst r12,4 + bld r0,0 + bst r15,1 + bld r12,4 + bst r14,6 + bld r15,1 + bst r13,5 + bld r14,6 + bst r14,3 + bld r13,5 + bst r0,0 + bld r14,3 + bst r12,5 + bld r0,0 + bst r14,1 + bld r12,5 + bst r14,4 + bld r14,1 + bst r15,5 + bld r14,4 + bst r14,7 + bld r15,5 + bst r0,0 + bld r14,7 + bst r12,6 + bld r0,0 + bst r13,1 + bld r12,6 + bst r14,2 + bld r13,1 + bst r13,4 + bld r14,2 + bst r15,3 + bld r13,4 + bst r0,0 + bld r15,3 + movw r18,r12 + movw r20,r14 + movw r12,r22 + movw r14,r2 + movw r22,r18 + movw r2,r20 + and r18,r4 + and r19,r5 + and r20,r6 + and r21,r7 + eor r8,r18 + eor r9,r19 + eor r10,r20 + eor r11,r21 + com r12 + com r13 + com r14 + com r15 + eor r4,r12 + eor r5,r13 + eor r6,r14 + eor r7,r15 + eor r12,r8 + eor r13,r9 + eor r14,r10 + eor r15,r11 + mov r0,r22 + or r0,r4 + eor r8,r0 + mov r0,r23 + or r0,r5 + eor r9,r0 + mov r0,r2 + or r0,r6 + eor r10,r0 + mov r0,r3 + or r0,r7 + eor r11,r0 + mov r0,r4 + and r0,r12 + eor r22,r0 + mov r0,r5 + and r0,r13 + eor r23,r0 + mov r0,r6 + and r0,r14 + eor r2,r0 + mov r0,r7 + and r0,r15 + eor r3,r0 + mov r0,r22 + and r0,r8 + eor r4,r0 + mov r0,r23 + and r0,r9 + eor r5,r0 + mov r0,r2 + and r0,r10 + eor r6,r0 + mov r0,r3 + and r0,r11 + eor r7,r0 + ret +611: +#if defined(RAMPZ) + pop r0 + out _SFR_IO_ADDR(RAMPZ),r0 +#endif + ldd r26,Y+17 + ldd r27,Y+18 + st X+,r3 + st X+,r2 + st X+,r23 + st X+,r22 + st X+,r7 + st X+,r6 + st X+,r5 + st X+,r4 + st X+,r11 + st X+,r10 + st X+,r9 + st X+,r8 + st X+,r15 + st X+,r14 + st X+,r13 + st X+,r12 + adiw r28,18 + in r0,0x3f + cli + out 0x3e,r29 + out 0x3f,r0 + out 0x3d,r28 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size gift128b_decrypt, .-gift128b_decrypt + +#endif + +#endif diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-util.h b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-util.h +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/sundae-gift.c b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/sundae-gift.c index 984a4db..d192b8e 100644 --- a/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/sundae-gift.c +++ b/sundae-gift/Implementations/crypto_aead/sundaegift96v1/rhys/sundae-gift.c @@ -140,8 +140,7 @@ static int sundae_gift_aead_encrypt *clen = mlen + SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Format and encrypt the initial domain separation block */ if (adlen > 0) @@ -205,8 +204,7 @@ static int sundae_gift_aead_decrypt len = *mlen = clen - SUNDAE_GIFT_TAG_SIZE; /* Set the key schedule */ - if (!gift128b_init(&ks, k, SUNDAE_GIFT_KEY_SIZE)) - return -1; + gift128b_init(&ks, k); /* Decrypt the ciphertext to produce the plaintext, using the * tag as the initialization vector for the decryption process */ diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.c b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.h b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/api.h b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/api.h deleted file mode 100644 index 32c9622..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/encrypt.c b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/encrypt.c deleted file mode 100644 index 832ac67..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "tinyjambu.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return tiny_jambu_128_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return tiny_jambu_128_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu-avr.S b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu-avr.S deleted file mode 100644 index c7f2d1c..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu-avr.S +++ /dev/null @@ -1,471 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global tiny_jambu_permutation - .type tiny_jambu_permutation, @function -tiny_jambu_permutation: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r26,r24 - movw r30,r22 -.L__stack_usage = 18 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - lsl r20 - lsl r20 - mov r19,r1 -19: - movw r24,r4 - movw r16,r6 - mov r15,r3 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r22,r24 - eor r23,r25 - eor r28,r16 - eor r29,r17 - mov r14,r7 - mov r15,r8 - mov r24,r9 - mov r25,r10 - mov r0,r6 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r9 - mov r0,r8 - mov r17,r10 - mov r21,r11 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r11 - mov r17,r12 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r22,r14 - eor r23,r15 - eor r28,r24 - eor r29,r25 - movw r24,r10 - movw r16,r12 - mov r15,r9 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r22,r15 - eor r23,r24 - eor r28,r25 - eor r29,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r22,r14 - eor r23,r15 - eor r28,r24 - eor r29,r25 - movw r24,r8 - movw r16,r10 - mov r15,r7 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r2,r24 - eor r3,r25 - eor r4,r16 - eor r5,r17 - mov r14,r11 - mov r15,r12 - mov r24,r13 - mov r25,r22 - mov r0,r10 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r13 - mov r0,r12 - mov r17,r22 - mov r21,r23 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r23 - mov r17,r28 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - movw r24,r22 - movw r16,r28 - mov r15,r13 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r2,r15 - eor r3,r24 - eor r4,r25 - eor r5,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - movw r24,r12 - movw r16,r22 - mov r15,r11 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r6,r24 - eor r7,r25 - eor r8,r16 - eor r9,r17 - mov r14,r23 - mov r15,r28 - mov r24,r29 - mov r25,r2 - mov r0,r22 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r29 - mov r0,r28 - mov r17,r2 - mov r21,r3 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r3 - mov r17,r4 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - movw r24,r2 - movw r16,r4 - mov r15,r29 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r6,r15 - eor r7,r24 - eor r8,r25 - eor r9,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - movw r24,r28 - movw r16,r2 - mov r15,r23 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r10,r24 - eor r11,r25 - eor r12,r16 - eor r13,r17 - mov r14,r3 - mov r15,r4 - mov r24,r5 - mov r25,r6 - mov r0,r2 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r5 - mov r0,r4 - mov r17,r6 - mov r21,r7 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r7 - mov r17,r8 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r10,r14 - eor r11,r15 - eor r12,r24 - eor r13,r25 - movw r24,r6 - movw r16,r8 - mov r15,r5 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r10,r15 - eor r11,r24 - eor r12,r25 - eor r13,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r10,r14 - eor r11,r15 - eor r12,r24 - eor r13,r25 - dec r18 - breq 401f - subi r19,240 - cp r19,r20 - breq 5396f - rjmp 19b -5396: - sub r30,r20 - sbc r31,r1 - mov r19,r1 - rjmp 19b -401: - st -X,r13 - st -X,r12 - st -X,r11 - st -X,r10 - st -X,r9 - st -X,r8 - st -X,r7 - st -X,r6 - st -X,r5 - st -X,r4 - st -X,r3 - st -X,r2 - st -X,r29 - st -X,r28 - st -X,r23 - st -X,r22 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size tiny_jambu_permutation, .-tiny_jambu_permutation - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.c deleted file mode 100644 index 7f6fcf2..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-tinyjambu.h" - -#if !defined(__AVR__) - -void tiny_jambu_permutation - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds) -{ - uint32_t t1, t2, t3, t4; - unsigned round; - - /* Load the state into local variables */ - uint32_t s0 = state[0]; - uint32_t s1 = state[1]; - uint32_t s2 = state[2]; - uint32_t s3 = state[3]; - - /* Perform all permutation rounds. Each round consists of 128 steps, - * which can be performed 32 at a time plus a rotation. After four - * sets of 32 steps, the rotation order returns to the original position. - * So we can hide the rotations by doing 128 steps each round */ - for (round = 0; round < rounds; ++round) { - /* Get the key words to use during this round */ - const uint32_t *k = &(key[(round * 4) % key_words]); - - /* Perform the 128 steps of this round in groups of 32 */ - #define tiny_jambu_steps_32(s0, s1, s2, s3, offset) \ - do { \ - t1 = (s1 >> 15) | (s2 << 17); \ - t2 = (s2 >> 6) | (s3 << 26); \ - t3 = (s2 >> 21) | (s3 << 11); \ - t4 = (s2 >> 27) | (s3 << 5); \ - s0 ^= t1 ^ (~(t2 & t3)) ^ t4 ^ k[offset]; \ - } while (0) - tiny_jambu_steps_32(s0, s1, s2, s3, 0); - tiny_jambu_steps_32(s1, s2, s3, s0, 1); - tiny_jambu_steps_32(s2, s3, s0, s1, 2); - tiny_jambu_steps_32(s3, s0, s1, s2, 3); - } - - /* Store the local variables back to the state */ - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; -} - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.h b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.h deleted file mode 100644 index f3bc599..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-tinyjambu.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_TINYJAMBU_H -#define LW_INTERNAL_TINYJAMBU_H - -#include "internal-util.h" - -/** - * \file internal-tinyjambu.h - * \brief Internal implementation of the TinyJAMBU permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the TinyJAMBU state in 32-bit words. - */ -#define TINY_JAMBU_STATE_SIZE 4 - -/** - * \brief Converts a number of steps into a number of rounds, where each - * round consists of 128 steps. - * - * \param steps The number of steps to perform; 384, 1024, 1152, or 1280. - * - * \return The number of rounds corresponding to \a steps. - */ -#define TINYJAMBU_ROUNDS(steps) ((steps) / 128) - -/** - * \brief Perform the TinyJAMBU permutation. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform. - * - * The number of key words should be 4 for TinyJAMBU-128, 12 for TinyJAMBU-192, - * and 8 for TinuJAMBU-256. The TinyJAMBU-192 key is duplicated so that the - * \a key_words parameter is a multiple of 4. - */ -void tiny_jambu_permutation - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-util.h b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.c deleted file mode 100644 index 09fc41d..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.c +++ /dev/null @@ -1,487 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "tinyjambu.h" -#include "internal-tinyjambu.h" -#include - -aead_cipher_t const tiny_jambu_128_cipher = { - "TinyJAMBU-128", - TINY_JAMBU_128_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_128_aead_encrypt, - tiny_jambu_128_aead_decrypt -}; - -aead_cipher_t const tiny_jambu_192_cipher = { - "TinyJAMBU-192", - TINY_JAMBU_192_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_192_aead_encrypt, - tiny_jambu_192_aead_decrypt -}; - -aead_cipher_t const tiny_jambu_256_cipher = { - "TinyJAMBU-256", - TINY_JAMBU_256_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_256_aead_encrypt, - tiny_jambu_256_aead_decrypt -}; - -/** - * \brief Set up the TinyJAMBU state with the key and the nonce. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to absorb the key. - * \param nonce Points to the nonce. - * - * \sa tiny_jambu_permutation() - */ -static void tiny_jambu_setup - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, const unsigned char *nonce) -{ - /* Initialize the state with the key */ - memset(state, 0, TINY_JAMBU_STATE_SIZE * sizeof(uint32_t)); - tiny_jambu_permutation(state, key, key_words, rounds); - - /* Absorb the three 32-bit words of the 96-bit nonce */ - state[1] ^= 0x10; /* Domain separator for the nonce */ - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce); - state[1] ^= 0x10; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce + 4); - state[1] ^= 0x10; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce + 8); -} - -/** - * \brief Processes the associated data for TinyJAMBU. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void tiny_jambu_process_ad - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, const unsigned char *ad, unsigned long long adlen) -{ - /* Process as many full 32-bit words as we can */ - while (adlen >= 4) { - state[1] ^= 0x30; /* Domain separator for associated data */ - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(ad); - ad += 4; - adlen -= 4; - } - - /* Handle the left-over associated data bytes, if any */ - if (adlen == 1) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= ad[0]; - state[1] ^= 0x01; - } else if (adlen == 2) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word16(ad); - state[1] ^= 0x02; - } else if (adlen == 3) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word16(ad) | (((uint32_t)(ad[2])) << 16); - state[1] ^= 0x03; - } -} - -/** - * \brief Encrypts the plaintext with TinyJAMBU to produce the ciphertext. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to process the plaintext. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Length of the plaintext in bytes. - */ -static void tiny_jambu_encrypt - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - uint32_t data; - - /* Process as many full 32-bit words as we can */ - while (mlen >= 4) { - state[1] ^= 0x50; /* Domain separator for message data */ - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word32(m); - state[3] ^= data; - data ^= state[2]; - le_store_word32(c, data); - c += 4; - m += 4; - mlen -= 4; - } - - /* Handle the left-over plaintext data bytes, if any */ - if (mlen == 1) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = m[0]; - state[3] ^= data; - state[1] ^= 0x01; - c[0] = (uint8_t)(state[2] ^ data); - } else if (mlen == 2) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(m); - state[3] ^= data; - state[1] ^= 0x02; - data ^= state[2]; - c[0] = (uint8_t)data; - c[1] = (uint8_t)(data >> 8); - } else if (mlen == 3) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(m) | (((uint32_t)(m[2])) << 16); - state[3] ^= data; - state[1] ^= 0x03; - data ^= state[2]; - c[0] = (uint8_t)data; - c[1] = (uint8_t)(data >> 8); - c[2] = (uint8_t)(data >> 16); - } -} - -/** - * \brief Decrypts the ciphertext with TinyJAMBU to produce the plaintext. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to process the ciphertext. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param mlen Length of the plaintext in bytes. - */ -static void tiny_jambu_decrypt - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - uint32_t data; - - /* Process as many full 32-bit words as we can */ - while (mlen >= 4) { - state[1] ^= 0x50; /* Domain separator for message data */ - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word32(c) ^ state[2]; - state[3] ^= data; - le_store_word32(m, data); - c += 4; - m += 4; - mlen -= 4; - } - - /* Handle the left-over ciphertext data bytes, if any */ - if (mlen == 1) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = (c[0] ^ state[2]) & 0xFFU; - state[3] ^= data; - state[1] ^= 0x01; - m[0] = (uint8_t)data; - } else if (mlen == 2) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = (le_load_word16(c) ^ state[2]) & 0xFFFFU; - state[3] ^= data; - state[1] ^= 0x02; - m[0] = (uint8_t)data; - m[1] = (uint8_t)(data >> 8); - } else if (mlen == 3) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(c) | (((uint32_t)(c[2])) << 16); - data = (data ^ state[2]) & 0xFFFFFFU; - state[3] ^= data; - state[1] ^= 0x03; - m[0] = (uint8_t)data; - m[1] = (uint8_t)(data >> 8); - m[2] = (uint8_t)(data >> 16); - } -} - -/** - * \brief Generates the final authentication tag for TinyJAMBU. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to generate the tag. - * \param tag Buffer to receive the tag. - */ -static void tiny_jambu_generate_tag - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *tag) -{ - state[1] ^= 0x70; /* Domain separator for finalization */ - tiny_jambu_permutation(state, key, key_words, rounds); - le_store_word32(tag, state[2]); - state[1] ^= 0x70; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - le_store_word32(tag + 4, state[2]); -} - -int tiny_jambu_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[4]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 4, TINYJAMBU_ROUNDS(1024), npub); - tiny_jambu_process_ad(state, key, 4, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 4, TINYJAMBU_ROUNDS(1024), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 4, TINYJAMBU_ROUNDS(1024), c + mlen); - return 0; -} - -int tiny_jambu_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[4]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 4, TINYJAMBU_ROUNDS(1024), npub); - tiny_jambu_process_ad(state, key, 4, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 4, TINYJAMBU_ROUNDS(1024), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 4, TINYJAMBU_ROUNDS(1024), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} - -int tiny_jambu_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[12]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key and duplicate it to make the length a multiple of 4 */ - key[6] = key[0] = le_load_word32(k); - key[7] = key[1] = le_load_word32(k + 4); - key[8] = key[2] = le_load_word32(k + 8); - key[9] = key[3] = le_load_word32(k + 12); - key[10] = key[4] = le_load_word32(k + 16); - key[11] = key[5] = le_load_word32(k + 20); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 12, TINYJAMBU_ROUNDS(1152), npub); - tiny_jambu_process_ad(state, key, 12, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 12, TINYJAMBU_ROUNDS(1152), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 12, TINYJAMBU_ROUNDS(1152), c + mlen); - return 0; -} - -int tiny_jambu_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[12]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key and duplicate it to make the length a multiple of 4 */ - key[6] = key[0] = le_load_word32(k); - key[7] = key[1] = le_load_word32(k + 4); - key[8] = key[2] = le_load_word32(k + 8); - key[9] = key[3] = le_load_word32(k + 12); - key[10] = key[4] = le_load_word32(k + 16); - key[11] = key[5] = le_load_word32(k + 20); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 12, TINYJAMBU_ROUNDS(1152), npub); - tiny_jambu_process_ad(state, key, 12, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 12, TINYJAMBU_ROUNDS(1152), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 12, TINYJAMBU_ROUNDS(1152), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} - -int tiny_jambu_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[8]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - key[4] = le_load_word32(k + 16); - key[5] = le_load_word32(k + 20); - key[6] = le_load_word32(k + 24); - key[7] = le_load_word32(k + 28); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 8, TINYJAMBU_ROUNDS(1280), npub); - tiny_jambu_process_ad(state, key, 8, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 8, TINYJAMBU_ROUNDS(1280), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 8, TINYJAMBU_ROUNDS(1280), c + mlen); - return 0; -} - -int tiny_jambu_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[8]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - key[4] = le_load_word32(k + 16); - key[5] = le_load_word32(k + 20); - key[6] = le_load_word32(k + 24); - key[7] = le_load_word32(k + 28); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 8, TINYJAMBU_ROUNDS(1280), npub); - tiny_jambu_process_ad(state, key, 8, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 8, TINYJAMBU_ROUNDS(1280), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 8, TINYJAMBU_ROUNDS(1280), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.h b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.h deleted file mode 100644 index cb304ff..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys-avr/tinyjambu.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_TINYJAMBU_H -#define LWCRYPTO_TINYJAMBU_H - -#include "aead-common.h" - -/** - * \file tinyjambu.h - * \brief TinyJAMBU authenticated encryption algorithm. - * - * TinyJAMBU is a family of encryption algorithms that are built around a - * lightweight 128-bit permutation. There are three variants of TinyJAMBU - * with different key sizes: - * - * \li TinyJAMBU-128 with a 128-bit key, a 96-bit nonce, and a 64-bit tag. - * This is the primary member of the family. - * \li TinyJAMBU-192 with a 192-bit key, a 96-bit nonce, and a 64-bit tag. - * \li TinyJAMBU-256 with a 256-bit key, a 96-bit nonce, and a 64-bit tag. - * - * TinyJAMBU has one of the smallest RAM and flash memory footprints - * out of all the algorithms in this library. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for TinyJAMBU-128. - */ -#define TINY_JAMBU_128_KEY_SIZE 16 - -/** - * \brief Size of the key for TinyJAMBU-192. - */ -#define TINY_JAMBU_192_KEY_SIZE 24 - -/** - * \brief Size of the key for TinyJAMBU-256. - */ -#define TINY_JAMBU_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for all TinyJAMBU variants. - */ -#define TINY_JAMBU_TAG_SIZE 8 - -/** - * \brief Size of the nonce for all TinyJAMBU variants. - */ -#define TINY_JAMBU_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the TinyJAMBU-128 cipher. - */ -extern aead_cipher_t const tiny_jambu_128_cipher; - -/** - * \brief Meta-information block for the TinyJAMBU-192 cipher. - */ -extern aead_cipher_t const tiny_jambu_192_cipher; - -/** - * \brief Meta-information block for the TinyJAMBU-256 cipher. - */ -extern aead_cipher_t const tiny_jambu_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_128_aead_decrypt() - */ -int tiny_jambu_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_128_aead_encrypt() - */ -int tiny_jambu_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_192_aead_decrypt() - */ -int tiny_jambu_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_192_aead_encrypt() - */ -int tiny_jambu_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_256_aead_decrypt() - */ -int tiny_jambu_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_256_aead_encrypt() - */ -int tiny_jambu_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu-avr.S b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu-avr.S new file mode 100644 index 0000000..c7f2d1c --- /dev/null +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu-avr.S @@ -0,0 +1,471 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global tiny_jambu_permutation + .type tiny_jambu_permutation, @function +tiny_jambu_permutation: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r26,r24 + movw r30,r22 +.L__stack_usage = 18 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + lsl r20 + lsl r20 + mov r19,r1 +19: + movw r24,r4 + movw r16,r6 + mov r15,r3 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r22,r24 + eor r23,r25 + eor r28,r16 + eor r29,r17 + mov r14,r7 + mov r15,r8 + mov r24,r9 + mov r25,r10 + mov r0,r6 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r9 + mov r0,r8 + mov r17,r10 + mov r21,r11 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r11 + mov r17,r12 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r22,r14 + eor r23,r15 + eor r28,r24 + eor r29,r25 + movw r24,r10 + movw r16,r12 + mov r15,r9 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r22,r15 + eor r23,r24 + eor r28,r25 + eor r29,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r22,r14 + eor r23,r15 + eor r28,r24 + eor r29,r25 + movw r24,r8 + movw r16,r10 + mov r15,r7 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r2,r24 + eor r3,r25 + eor r4,r16 + eor r5,r17 + mov r14,r11 + mov r15,r12 + mov r24,r13 + mov r25,r22 + mov r0,r10 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r13 + mov r0,r12 + mov r17,r22 + mov r21,r23 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r23 + mov r17,r28 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + movw r24,r22 + movw r16,r28 + mov r15,r13 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r2,r15 + eor r3,r24 + eor r4,r25 + eor r5,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + movw r24,r12 + movw r16,r22 + mov r15,r11 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r6,r24 + eor r7,r25 + eor r8,r16 + eor r9,r17 + mov r14,r23 + mov r15,r28 + mov r24,r29 + mov r25,r2 + mov r0,r22 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r29 + mov r0,r28 + mov r17,r2 + mov r21,r3 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r3 + mov r17,r4 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + movw r24,r2 + movw r16,r4 + mov r15,r29 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r6,r15 + eor r7,r24 + eor r8,r25 + eor r9,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + movw r24,r28 + movw r16,r2 + mov r15,r23 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r10,r24 + eor r11,r25 + eor r12,r16 + eor r13,r17 + mov r14,r3 + mov r15,r4 + mov r24,r5 + mov r25,r6 + mov r0,r2 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r5 + mov r0,r4 + mov r17,r6 + mov r21,r7 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r7 + mov r17,r8 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r10,r14 + eor r11,r15 + eor r12,r24 + eor r13,r25 + movw r24,r6 + movw r16,r8 + mov r15,r5 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r10,r15 + eor r11,r24 + eor r12,r25 + eor r13,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r10,r14 + eor r11,r15 + eor r12,r24 + eor r13,r25 + dec r18 + breq 401f + subi r19,240 + cp r19,r20 + breq 5396f + rjmp 19b +5396: + sub r30,r20 + sbc r31,r1 + mov r19,r1 + rjmp 19b +401: + st -X,r13 + st -X,r12 + st -X,r11 + st -X,r10 + st -X,r9 + st -X,r8 + st -X,r7 + st -X,r6 + st -X,r5 + st -X,r4 + st -X,r3 + st -X,r2 + st -X,r29 + st -X,r28 + st -X,r23 + st -X,r22 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size tiny_jambu_permutation, .-tiny_jambu_permutation + +#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu.c index 7308718..7f6fcf2 100644 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu.c +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-tinyjambu.c @@ -22,6 +22,8 @@ #include "internal-tinyjambu.h" +#if !defined(__AVR__) + void tiny_jambu_permutation (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, unsigned key_words, unsigned rounds) @@ -64,3 +66,5 @@ void tiny_jambu_permutation state[2] = s2; state[3] = s3; } + +#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-util.h b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-util.h index e79158c..e30166d 100644 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-util.h +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu128/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.c b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.h b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/api.h b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/api.h deleted file mode 100644 index 1ee99ed..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 24 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/encrypt.c b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/encrypt.c deleted file mode 100644 index 62a5dde..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "tinyjambu.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return tiny_jambu_192_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return tiny_jambu_192_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu-avr.S b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu-avr.S deleted file mode 100644 index c7f2d1c..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu-avr.S +++ /dev/null @@ -1,471 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global tiny_jambu_permutation - .type tiny_jambu_permutation, @function -tiny_jambu_permutation: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r26,r24 - movw r30,r22 -.L__stack_usage = 18 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - lsl r20 - lsl r20 - mov r19,r1 -19: - movw r24,r4 - movw r16,r6 - mov r15,r3 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r22,r24 - eor r23,r25 - eor r28,r16 - eor r29,r17 - mov r14,r7 - mov r15,r8 - mov r24,r9 - mov r25,r10 - mov r0,r6 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r9 - mov r0,r8 - mov r17,r10 - mov r21,r11 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r11 - mov r17,r12 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r22,r14 - eor r23,r15 - eor r28,r24 - eor r29,r25 - movw r24,r10 - movw r16,r12 - mov r15,r9 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r22,r15 - eor r23,r24 - eor r28,r25 - eor r29,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r22,r14 - eor r23,r15 - eor r28,r24 - eor r29,r25 - movw r24,r8 - movw r16,r10 - mov r15,r7 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r2,r24 - eor r3,r25 - eor r4,r16 - eor r5,r17 - mov r14,r11 - mov r15,r12 - mov r24,r13 - mov r25,r22 - mov r0,r10 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r13 - mov r0,r12 - mov r17,r22 - mov r21,r23 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r23 - mov r17,r28 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - movw r24,r22 - movw r16,r28 - mov r15,r13 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r2,r15 - eor r3,r24 - eor r4,r25 - eor r5,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - movw r24,r12 - movw r16,r22 - mov r15,r11 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r6,r24 - eor r7,r25 - eor r8,r16 - eor r9,r17 - mov r14,r23 - mov r15,r28 - mov r24,r29 - mov r25,r2 - mov r0,r22 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r29 - mov r0,r28 - mov r17,r2 - mov r21,r3 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r3 - mov r17,r4 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - movw r24,r2 - movw r16,r4 - mov r15,r29 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r6,r15 - eor r7,r24 - eor r8,r25 - eor r9,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - movw r24,r28 - movw r16,r2 - mov r15,r23 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r10,r24 - eor r11,r25 - eor r12,r16 - eor r13,r17 - mov r14,r3 - mov r15,r4 - mov r24,r5 - mov r25,r6 - mov r0,r2 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r5 - mov r0,r4 - mov r17,r6 - mov r21,r7 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r7 - mov r17,r8 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r10,r14 - eor r11,r15 - eor r12,r24 - eor r13,r25 - movw r24,r6 - movw r16,r8 - mov r15,r5 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r10,r15 - eor r11,r24 - eor r12,r25 - eor r13,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r10,r14 - eor r11,r15 - eor r12,r24 - eor r13,r25 - dec r18 - breq 401f - subi r19,240 - cp r19,r20 - breq 5396f - rjmp 19b -5396: - sub r30,r20 - sbc r31,r1 - mov r19,r1 - rjmp 19b -401: - st -X,r13 - st -X,r12 - st -X,r11 - st -X,r10 - st -X,r9 - st -X,r8 - st -X,r7 - st -X,r6 - st -X,r5 - st -X,r4 - st -X,r3 - st -X,r2 - st -X,r29 - st -X,r28 - st -X,r23 - st -X,r22 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size tiny_jambu_permutation, .-tiny_jambu_permutation - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.c deleted file mode 100644 index 7f6fcf2..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-tinyjambu.h" - -#if !defined(__AVR__) - -void tiny_jambu_permutation - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds) -{ - uint32_t t1, t2, t3, t4; - unsigned round; - - /* Load the state into local variables */ - uint32_t s0 = state[0]; - uint32_t s1 = state[1]; - uint32_t s2 = state[2]; - uint32_t s3 = state[3]; - - /* Perform all permutation rounds. Each round consists of 128 steps, - * which can be performed 32 at a time plus a rotation. After four - * sets of 32 steps, the rotation order returns to the original position. - * So we can hide the rotations by doing 128 steps each round */ - for (round = 0; round < rounds; ++round) { - /* Get the key words to use during this round */ - const uint32_t *k = &(key[(round * 4) % key_words]); - - /* Perform the 128 steps of this round in groups of 32 */ - #define tiny_jambu_steps_32(s0, s1, s2, s3, offset) \ - do { \ - t1 = (s1 >> 15) | (s2 << 17); \ - t2 = (s2 >> 6) | (s3 << 26); \ - t3 = (s2 >> 21) | (s3 << 11); \ - t4 = (s2 >> 27) | (s3 << 5); \ - s0 ^= t1 ^ (~(t2 & t3)) ^ t4 ^ k[offset]; \ - } while (0) - tiny_jambu_steps_32(s0, s1, s2, s3, 0); - tiny_jambu_steps_32(s1, s2, s3, s0, 1); - tiny_jambu_steps_32(s2, s3, s0, s1, 2); - tiny_jambu_steps_32(s3, s0, s1, s2, 3); - } - - /* Store the local variables back to the state */ - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; -} - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.h b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.h deleted file mode 100644 index f3bc599..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-tinyjambu.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_TINYJAMBU_H -#define LW_INTERNAL_TINYJAMBU_H - -#include "internal-util.h" - -/** - * \file internal-tinyjambu.h - * \brief Internal implementation of the TinyJAMBU permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the TinyJAMBU state in 32-bit words. - */ -#define TINY_JAMBU_STATE_SIZE 4 - -/** - * \brief Converts a number of steps into a number of rounds, where each - * round consists of 128 steps. - * - * \param steps The number of steps to perform; 384, 1024, 1152, or 1280. - * - * \return The number of rounds corresponding to \a steps. - */ -#define TINYJAMBU_ROUNDS(steps) ((steps) / 128) - -/** - * \brief Perform the TinyJAMBU permutation. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform. - * - * The number of key words should be 4 for TinyJAMBU-128, 12 for TinyJAMBU-192, - * and 8 for TinuJAMBU-256. The TinyJAMBU-192 key is duplicated so that the - * \a key_words parameter is a multiple of 4. - */ -void tiny_jambu_permutation - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-util.h b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.c deleted file mode 100644 index 09fc41d..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.c +++ /dev/null @@ -1,487 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "tinyjambu.h" -#include "internal-tinyjambu.h" -#include - -aead_cipher_t const tiny_jambu_128_cipher = { - "TinyJAMBU-128", - TINY_JAMBU_128_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_128_aead_encrypt, - tiny_jambu_128_aead_decrypt -}; - -aead_cipher_t const tiny_jambu_192_cipher = { - "TinyJAMBU-192", - TINY_JAMBU_192_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_192_aead_encrypt, - tiny_jambu_192_aead_decrypt -}; - -aead_cipher_t const tiny_jambu_256_cipher = { - "TinyJAMBU-256", - TINY_JAMBU_256_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_256_aead_encrypt, - tiny_jambu_256_aead_decrypt -}; - -/** - * \brief Set up the TinyJAMBU state with the key and the nonce. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to absorb the key. - * \param nonce Points to the nonce. - * - * \sa tiny_jambu_permutation() - */ -static void tiny_jambu_setup - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, const unsigned char *nonce) -{ - /* Initialize the state with the key */ - memset(state, 0, TINY_JAMBU_STATE_SIZE * sizeof(uint32_t)); - tiny_jambu_permutation(state, key, key_words, rounds); - - /* Absorb the three 32-bit words of the 96-bit nonce */ - state[1] ^= 0x10; /* Domain separator for the nonce */ - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce); - state[1] ^= 0x10; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce + 4); - state[1] ^= 0x10; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce + 8); -} - -/** - * \brief Processes the associated data for TinyJAMBU. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void tiny_jambu_process_ad - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, const unsigned char *ad, unsigned long long adlen) -{ - /* Process as many full 32-bit words as we can */ - while (adlen >= 4) { - state[1] ^= 0x30; /* Domain separator for associated data */ - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(ad); - ad += 4; - adlen -= 4; - } - - /* Handle the left-over associated data bytes, if any */ - if (adlen == 1) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= ad[0]; - state[1] ^= 0x01; - } else if (adlen == 2) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word16(ad); - state[1] ^= 0x02; - } else if (adlen == 3) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word16(ad) | (((uint32_t)(ad[2])) << 16); - state[1] ^= 0x03; - } -} - -/** - * \brief Encrypts the plaintext with TinyJAMBU to produce the ciphertext. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to process the plaintext. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Length of the plaintext in bytes. - */ -static void tiny_jambu_encrypt - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - uint32_t data; - - /* Process as many full 32-bit words as we can */ - while (mlen >= 4) { - state[1] ^= 0x50; /* Domain separator for message data */ - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word32(m); - state[3] ^= data; - data ^= state[2]; - le_store_word32(c, data); - c += 4; - m += 4; - mlen -= 4; - } - - /* Handle the left-over plaintext data bytes, if any */ - if (mlen == 1) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = m[0]; - state[3] ^= data; - state[1] ^= 0x01; - c[0] = (uint8_t)(state[2] ^ data); - } else if (mlen == 2) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(m); - state[3] ^= data; - state[1] ^= 0x02; - data ^= state[2]; - c[0] = (uint8_t)data; - c[1] = (uint8_t)(data >> 8); - } else if (mlen == 3) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(m) | (((uint32_t)(m[2])) << 16); - state[3] ^= data; - state[1] ^= 0x03; - data ^= state[2]; - c[0] = (uint8_t)data; - c[1] = (uint8_t)(data >> 8); - c[2] = (uint8_t)(data >> 16); - } -} - -/** - * \brief Decrypts the ciphertext with TinyJAMBU to produce the plaintext. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to process the ciphertext. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param mlen Length of the plaintext in bytes. - */ -static void tiny_jambu_decrypt - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - uint32_t data; - - /* Process as many full 32-bit words as we can */ - while (mlen >= 4) { - state[1] ^= 0x50; /* Domain separator for message data */ - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word32(c) ^ state[2]; - state[3] ^= data; - le_store_word32(m, data); - c += 4; - m += 4; - mlen -= 4; - } - - /* Handle the left-over ciphertext data bytes, if any */ - if (mlen == 1) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = (c[0] ^ state[2]) & 0xFFU; - state[3] ^= data; - state[1] ^= 0x01; - m[0] = (uint8_t)data; - } else if (mlen == 2) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = (le_load_word16(c) ^ state[2]) & 0xFFFFU; - state[3] ^= data; - state[1] ^= 0x02; - m[0] = (uint8_t)data; - m[1] = (uint8_t)(data >> 8); - } else if (mlen == 3) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(c) | (((uint32_t)(c[2])) << 16); - data = (data ^ state[2]) & 0xFFFFFFU; - state[3] ^= data; - state[1] ^= 0x03; - m[0] = (uint8_t)data; - m[1] = (uint8_t)(data >> 8); - m[2] = (uint8_t)(data >> 16); - } -} - -/** - * \brief Generates the final authentication tag for TinyJAMBU. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to generate the tag. - * \param tag Buffer to receive the tag. - */ -static void tiny_jambu_generate_tag - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *tag) -{ - state[1] ^= 0x70; /* Domain separator for finalization */ - tiny_jambu_permutation(state, key, key_words, rounds); - le_store_word32(tag, state[2]); - state[1] ^= 0x70; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - le_store_word32(tag + 4, state[2]); -} - -int tiny_jambu_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[4]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 4, TINYJAMBU_ROUNDS(1024), npub); - tiny_jambu_process_ad(state, key, 4, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 4, TINYJAMBU_ROUNDS(1024), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 4, TINYJAMBU_ROUNDS(1024), c + mlen); - return 0; -} - -int tiny_jambu_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[4]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 4, TINYJAMBU_ROUNDS(1024), npub); - tiny_jambu_process_ad(state, key, 4, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 4, TINYJAMBU_ROUNDS(1024), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 4, TINYJAMBU_ROUNDS(1024), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} - -int tiny_jambu_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[12]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key and duplicate it to make the length a multiple of 4 */ - key[6] = key[0] = le_load_word32(k); - key[7] = key[1] = le_load_word32(k + 4); - key[8] = key[2] = le_load_word32(k + 8); - key[9] = key[3] = le_load_word32(k + 12); - key[10] = key[4] = le_load_word32(k + 16); - key[11] = key[5] = le_load_word32(k + 20); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 12, TINYJAMBU_ROUNDS(1152), npub); - tiny_jambu_process_ad(state, key, 12, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 12, TINYJAMBU_ROUNDS(1152), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 12, TINYJAMBU_ROUNDS(1152), c + mlen); - return 0; -} - -int tiny_jambu_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[12]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key and duplicate it to make the length a multiple of 4 */ - key[6] = key[0] = le_load_word32(k); - key[7] = key[1] = le_load_word32(k + 4); - key[8] = key[2] = le_load_word32(k + 8); - key[9] = key[3] = le_load_word32(k + 12); - key[10] = key[4] = le_load_word32(k + 16); - key[11] = key[5] = le_load_word32(k + 20); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 12, TINYJAMBU_ROUNDS(1152), npub); - tiny_jambu_process_ad(state, key, 12, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 12, TINYJAMBU_ROUNDS(1152), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 12, TINYJAMBU_ROUNDS(1152), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} - -int tiny_jambu_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[8]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - key[4] = le_load_word32(k + 16); - key[5] = le_load_word32(k + 20); - key[6] = le_load_word32(k + 24); - key[7] = le_load_word32(k + 28); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 8, TINYJAMBU_ROUNDS(1280), npub); - tiny_jambu_process_ad(state, key, 8, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 8, TINYJAMBU_ROUNDS(1280), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 8, TINYJAMBU_ROUNDS(1280), c + mlen); - return 0; -} - -int tiny_jambu_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[8]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - key[4] = le_load_word32(k + 16); - key[5] = le_load_word32(k + 20); - key[6] = le_load_word32(k + 24); - key[7] = le_load_word32(k + 28); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 8, TINYJAMBU_ROUNDS(1280), npub); - tiny_jambu_process_ad(state, key, 8, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 8, TINYJAMBU_ROUNDS(1280), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 8, TINYJAMBU_ROUNDS(1280), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.h b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.h deleted file mode 100644 index cb304ff..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys-avr/tinyjambu.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_TINYJAMBU_H -#define LWCRYPTO_TINYJAMBU_H - -#include "aead-common.h" - -/** - * \file tinyjambu.h - * \brief TinyJAMBU authenticated encryption algorithm. - * - * TinyJAMBU is a family of encryption algorithms that are built around a - * lightweight 128-bit permutation. There are three variants of TinyJAMBU - * with different key sizes: - * - * \li TinyJAMBU-128 with a 128-bit key, a 96-bit nonce, and a 64-bit tag. - * This is the primary member of the family. - * \li TinyJAMBU-192 with a 192-bit key, a 96-bit nonce, and a 64-bit tag. - * \li TinyJAMBU-256 with a 256-bit key, a 96-bit nonce, and a 64-bit tag. - * - * TinyJAMBU has one of the smallest RAM and flash memory footprints - * out of all the algorithms in this library. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for TinyJAMBU-128. - */ -#define TINY_JAMBU_128_KEY_SIZE 16 - -/** - * \brief Size of the key for TinyJAMBU-192. - */ -#define TINY_JAMBU_192_KEY_SIZE 24 - -/** - * \brief Size of the key for TinyJAMBU-256. - */ -#define TINY_JAMBU_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for all TinyJAMBU variants. - */ -#define TINY_JAMBU_TAG_SIZE 8 - -/** - * \brief Size of the nonce for all TinyJAMBU variants. - */ -#define TINY_JAMBU_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the TinyJAMBU-128 cipher. - */ -extern aead_cipher_t const tiny_jambu_128_cipher; - -/** - * \brief Meta-information block for the TinyJAMBU-192 cipher. - */ -extern aead_cipher_t const tiny_jambu_192_cipher; - -/** - * \brief Meta-information block for the TinyJAMBU-256 cipher. - */ -extern aead_cipher_t const tiny_jambu_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_128_aead_decrypt() - */ -int tiny_jambu_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_128_aead_encrypt() - */ -int tiny_jambu_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_192_aead_decrypt() - */ -int tiny_jambu_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_192_aead_encrypt() - */ -int tiny_jambu_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_256_aead_decrypt() - */ -int tiny_jambu_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_256_aead_encrypt() - */ -int tiny_jambu_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu-avr.S b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu-avr.S new file mode 100644 index 0000000..c7f2d1c --- /dev/null +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu-avr.S @@ -0,0 +1,471 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global tiny_jambu_permutation + .type tiny_jambu_permutation, @function +tiny_jambu_permutation: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r26,r24 + movw r30,r22 +.L__stack_usage = 18 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + lsl r20 + lsl r20 + mov r19,r1 +19: + movw r24,r4 + movw r16,r6 + mov r15,r3 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r22,r24 + eor r23,r25 + eor r28,r16 + eor r29,r17 + mov r14,r7 + mov r15,r8 + mov r24,r9 + mov r25,r10 + mov r0,r6 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r9 + mov r0,r8 + mov r17,r10 + mov r21,r11 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r11 + mov r17,r12 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r22,r14 + eor r23,r15 + eor r28,r24 + eor r29,r25 + movw r24,r10 + movw r16,r12 + mov r15,r9 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r22,r15 + eor r23,r24 + eor r28,r25 + eor r29,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r22,r14 + eor r23,r15 + eor r28,r24 + eor r29,r25 + movw r24,r8 + movw r16,r10 + mov r15,r7 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r2,r24 + eor r3,r25 + eor r4,r16 + eor r5,r17 + mov r14,r11 + mov r15,r12 + mov r24,r13 + mov r25,r22 + mov r0,r10 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r13 + mov r0,r12 + mov r17,r22 + mov r21,r23 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r23 + mov r17,r28 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + movw r24,r22 + movw r16,r28 + mov r15,r13 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r2,r15 + eor r3,r24 + eor r4,r25 + eor r5,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + movw r24,r12 + movw r16,r22 + mov r15,r11 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r6,r24 + eor r7,r25 + eor r8,r16 + eor r9,r17 + mov r14,r23 + mov r15,r28 + mov r24,r29 + mov r25,r2 + mov r0,r22 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r29 + mov r0,r28 + mov r17,r2 + mov r21,r3 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r3 + mov r17,r4 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + movw r24,r2 + movw r16,r4 + mov r15,r29 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r6,r15 + eor r7,r24 + eor r8,r25 + eor r9,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + movw r24,r28 + movw r16,r2 + mov r15,r23 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r10,r24 + eor r11,r25 + eor r12,r16 + eor r13,r17 + mov r14,r3 + mov r15,r4 + mov r24,r5 + mov r25,r6 + mov r0,r2 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r5 + mov r0,r4 + mov r17,r6 + mov r21,r7 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r7 + mov r17,r8 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r10,r14 + eor r11,r15 + eor r12,r24 + eor r13,r25 + movw r24,r6 + movw r16,r8 + mov r15,r5 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r10,r15 + eor r11,r24 + eor r12,r25 + eor r13,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r10,r14 + eor r11,r15 + eor r12,r24 + eor r13,r25 + dec r18 + breq 401f + subi r19,240 + cp r19,r20 + breq 5396f + rjmp 19b +5396: + sub r30,r20 + sbc r31,r1 + mov r19,r1 + rjmp 19b +401: + st -X,r13 + st -X,r12 + st -X,r11 + st -X,r10 + st -X,r9 + st -X,r8 + st -X,r7 + st -X,r6 + st -X,r5 + st -X,r4 + st -X,r3 + st -X,r2 + st -X,r29 + st -X,r28 + st -X,r23 + st -X,r22 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size tiny_jambu_permutation, .-tiny_jambu_permutation + +#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu.c index 7308718..7f6fcf2 100644 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu.c +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-tinyjambu.c @@ -22,6 +22,8 @@ #include "internal-tinyjambu.h" +#if !defined(__AVR__) + void tiny_jambu_permutation (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, unsigned key_words, unsigned rounds) @@ -64,3 +66,5 @@ void tiny_jambu_permutation state[2] = s2; state[3] = s3; } + +#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-util.h b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-util.h index e79158c..e30166d 100644 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-util.h +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu192/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.c b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.h b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/api.h b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/api.h deleted file mode 100644 index fd4ff9f..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 32 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 12 -#define CRYPTO_ABYTES 8 -#define CRYPTO_NOOVERLAP 1 diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/encrypt.c b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/encrypt.c deleted file mode 100644 index 357b9fe..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "tinyjambu.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return tiny_jambu_256_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return tiny_jambu_256_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu-avr.S b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu-avr.S deleted file mode 100644 index c7f2d1c..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu-avr.S +++ /dev/null @@ -1,471 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global tiny_jambu_permutation - .type tiny_jambu_permutation, @function -tiny_jambu_permutation: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - push r16 - push r17 - movw r26,r24 - movw r30,r22 -.L__stack_usage = 18 - ld r22,X+ - ld r23,X+ - ld r28,X+ - ld r29,X+ - ld r2,X+ - ld r3,X+ - ld r4,X+ - ld r5,X+ - ld r6,X+ - ld r7,X+ - ld r8,X+ - ld r9,X+ - ld r10,X+ - ld r11,X+ - ld r12,X+ - ld r13,X+ - lsl r20 - lsl r20 - mov r19,r1 -19: - movw r24,r4 - movw r16,r6 - mov r15,r3 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r22,r24 - eor r23,r25 - eor r28,r16 - eor r29,r17 - mov r14,r7 - mov r15,r8 - mov r24,r9 - mov r25,r10 - mov r0,r6 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r9 - mov r0,r8 - mov r17,r10 - mov r21,r11 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r11 - mov r17,r12 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r22,r14 - eor r23,r15 - eor r28,r24 - eor r29,r25 - movw r24,r10 - movw r16,r12 - mov r15,r9 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r22,r15 - eor r23,r24 - eor r28,r25 - eor r29,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r22,r14 - eor r23,r15 - eor r28,r24 - eor r29,r25 - movw r24,r8 - movw r16,r10 - mov r15,r7 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r2,r24 - eor r3,r25 - eor r4,r16 - eor r5,r17 - mov r14,r11 - mov r15,r12 - mov r24,r13 - mov r25,r22 - mov r0,r10 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r13 - mov r0,r12 - mov r17,r22 - mov r21,r23 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r23 - mov r17,r28 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - movw r24,r22 - movw r16,r28 - mov r15,r13 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r2,r15 - eor r3,r24 - eor r4,r25 - eor r5,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r2,r14 - eor r3,r15 - eor r4,r24 - eor r5,r25 - movw r24,r12 - movw r16,r22 - mov r15,r11 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r6,r24 - eor r7,r25 - eor r8,r16 - eor r9,r17 - mov r14,r23 - mov r15,r28 - mov r24,r29 - mov r25,r2 - mov r0,r22 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r29 - mov r0,r28 - mov r17,r2 - mov r21,r3 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r3 - mov r17,r4 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - movw r24,r2 - movw r16,r4 - mov r15,r29 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r6,r15 - eor r7,r24 - eor r8,r25 - eor r9,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r6,r14 - eor r7,r15 - eor r8,r24 - eor r9,r25 - movw r24,r28 - movw r16,r2 - mov r15,r23 - lsl r15 - rol r24 - rol r25 - rol r16 - rol r17 - eor r10,r24 - eor r11,r25 - eor r12,r16 - eor r13,r17 - mov r14,r3 - mov r15,r4 - mov r24,r5 - mov r25,r6 - mov r0,r2 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - lsl r0 - rol r14 - rol r15 - rol r24 - rol r25 - mov r16,r5 - mov r0,r4 - mov r17,r6 - mov r21,r7 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - lsl r0 - rol r16 - rol r17 - rol r21 - and r14,r16 - and r15,r17 - and r24,r21 - mov r16,r7 - mov r17,r8 - lsl r16 - rol r17 - lsl r16 - rol r17 - lsl r16 - rol r17 - and r25,r17 - com r14 - com r15 - com r24 - com r25 - eor r10,r14 - eor r11,r15 - eor r12,r24 - eor r13,r25 - movw r24,r6 - movw r16,r8 - mov r15,r5 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - lsr r17 - ror r16 - ror r25 - ror r24 - ror r15 - eor r10,r15 - eor r11,r24 - eor r12,r25 - eor r13,r16 - ld r14,Z+ - ld r15,Z+ - ld r24,Z+ - ld r25,Z+ - eor r10,r14 - eor r11,r15 - eor r12,r24 - eor r13,r25 - dec r18 - breq 401f - subi r19,240 - cp r19,r20 - breq 5396f - rjmp 19b -5396: - sub r30,r20 - sbc r31,r1 - mov r19,r1 - rjmp 19b -401: - st -X,r13 - st -X,r12 - st -X,r11 - st -X,r10 - st -X,r9 - st -X,r8 - st -X,r7 - st -X,r6 - st -X,r5 - st -X,r4 - st -X,r3 - st -X,r2 - st -X,r29 - st -X,r28 - st -X,r23 - st -X,r22 - pop r17 - pop r16 - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size tiny_jambu_permutation, .-tiny_jambu_permutation - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.c deleted file mode 100644 index 7f6fcf2..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-tinyjambu.h" - -#if !defined(__AVR__) - -void tiny_jambu_permutation - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds) -{ - uint32_t t1, t2, t3, t4; - unsigned round; - - /* Load the state into local variables */ - uint32_t s0 = state[0]; - uint32_t s1 = state[1]; - uint32_t s2 = state[2]; - uint32_t s3 = state[3]; - - /* Perform all permutation rounds. Each round consists of 128 steps, - * which can be performed 32 at a time plus a rotation. After four - * sets of 32 steps, the rotation order returns to the original position. - * So we can hide the rotations by doing 128 steps each round */ - for (round = 0; round < rounds; ++round) { - /* Get the key words to use during this round */ - const uint32_t *k = &(key[(round * 4) % key_words]); - - /* Perform the 128 steps of this round in groups of 32 */ - #define tiny_jambu_steps_32(s0, s1, s2, s3, offset) \ - do { \ - t1 = (s1 >> 15) | (s2 << 17); \ - t2 = (s2 >> 6) | (s3 << 26); \ - t3 = (s2 >> 21) | (s3 << 11); \ - t4 = (s2 >> 27) | (s3 << 5); \ - s0 ^= t1 ^ (~(t2 & t3)) ^ t4 ^ k[offset]; \ - } while (0) - tiny_jambu_steps_32(s0, s1, s2, s3, 0); - tiny_jambu_steps_32(s1, s2, s3, s0, 1); - tiny_jambu_steps_32(s2, s3, s0, s1, 2); - tiny_jambu_steps_32(s3, s0, s1, s2, 3); - } - - /* Store the local variables back to the state */ - state[0] = s0; - state[1] = s1; - state[2] = s2; - state[3] = s3; -} - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.h b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.h deleted file mode 100644 index f3bc599..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-tinyjambu.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_TINYJAMBU_H -#define LW_INTERNAL_TINYJAMBU_H - -#include "internal-util.h" - -/** - * \file internal-tinyjambu.h - * \brief Internal implementation of the TinyJAMBU permutation. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the TinyJAMBU state in 32-bit words. - */ -#define TINY_JAMBU_STATE_SIZE 4 - -/** - * \brief Converts a number of steps into a number of rounds, where each - * round consists of 128 steps. - * - * \param steps The number of steps to perform; 384, 1024, 1152, or 1280. - * - * \return The number of rounds corresponding to \a steps. - */ -#define TINYJAMBU_ROUNDS(steps) ((steps) / 128) - -/** - * \brief Perform the TinyJAMBU permutation. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform. - * - * The number of key words should be 4 for TinyJAMBU-128, 12 for TinyJAMBU-192, - * and 8 for TinuJAMBU-256. The TinyJAMBU-192 key is duplicated so that the - * \a key_words parameter is a multiple of 4. - */ -void tiny_jambu_permutation - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-util.h b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.c deleted file mode 100644 index 09fc41d..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.c +++ /dev/null @@ -1,487 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "tinyjambu.h" -#include "internal-tinyjambu.h" -#include - -aead_cipher_t const tiny_jambu_128_cipher = { - "TinyJAMBU-128", - TINY_JAMBU_128_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_128_aead_encrypt, - tiny_jambu_128_aead_decrypt -}; - -aead_cipher_t const tiny_jambu_192_cipher = { - "TinyJAMBU-192", - TINY_JAMBU_192_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_192_aead_encrypt, - tiny_jambu_192_aead_decrypt -}; - -aead_cipher_t const tiny_jambu_256_cipher = { - "TinyJAMBU-256", - TINY_JAMBU_256_KEY_SIZE, - TINY_JAMBU_NONCE_SIZE, - TINY_JAMBU_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - tiny_jambu_256_aead_encrypt, - tiny_jambu_256_aead_decrypt -}; - -/** - * \brief Set up the TinyJAMBU state with the key and the nonce. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to absorb the key. - * \param nonce Points to the nonce. - * - * \sa tiny_jambu_permutation() - */ -static void tiny_jambu_setup - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, const unsigned char *nonce) -{ - /* Initialize the state with the key */ - memset(state, 0, TINY_JAMBU_STATE_SIZE * sizeof(uint32_t)); - tiny_jambu_permutation(state, key, key_words, rounds); - - /* Absorb the three 32-bit words of the 96-bit nonce */ - state[1] ^= 0x10; /* Domain separator for the nonce */ - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce); - state[1] ^= 0x10; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce + 4); - state[1] ^= 0x10; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(nonce + 8); -} - -/** - * \brief Processes the associated data for TinyJAMBU. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param ad Points to the associated data. - * \param adlen Length of the associated data in bytes. - */ -static void tiny_jambu_process_ad - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, const unsigned char *ad, unsigned long long adlen) -{ - /* Process as many full 32-bit words as we can */ - while (adlen >= 4) { - state[1] ^= 0x30; /* Domain separator for associated data */ - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word32(ad); - ad += 4; - adlen -= 4; - } - - /* Handle the left-over associated data bytes, if any */ - if (adlen == 1) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= ad[0]; - state[1] ^= 0x01; - } else if (adlen == 2) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word16(ad); - state[1] ^= 0x02; - } else if (adlen == 3) { - state[1] ^= 0x30; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - state[3] ^= le_load_word16(ad) | (((uint32_t)(ad[2])) << 16); - state[1] ^= 0x03; - } -} - -/** - * \brief Encrypts the plaintext with TinyJAMBU to produce the ciphertext. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to process the plaintext. - * \param c Points to the ciphertext output buffer. - * \param m Points to the plaintext input buffer. - * \param mlen Length of the plaintext in bytes. - */ -static void tiny_jambu_encrypt - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *c, - const unsigned char *m, unsigned long long mlen) -{ - uint32_t data; - - /* Process as many full 32-bit words as we can */ - while (mlen >= 4) { - state[1] ^= 0x50; /* Domain separator for message data */ - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word32(m); - state[3] ^= data; - data ^= state[2]; - le_store_word32(c, data); - c += 4; - m += 4; - mlen -= 4; - } - - /* Handle the left-over plaintext data bytes, if any */ - if (mlen == 1) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = m[0]; - state[3] ^= data; - state[1] ^= 0x01; - c[0] = (uint8_t)(state[2] ^ data); - } else if (mlen == 2) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(m); - state[3] ^= data; - state[1] ^= 0x02; - data ^= state[2]; - c[0] = (uint8_t)data; - c[1] = (uint8_t)(data >> 8); - } else if (mlen == 3) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(m) | (((uint32_t)(m[2])) << 16); - state[3] ^= data; - state[1] ^= 0x03; - data ^= state[2]; - c[0] = (uint8_t)data; - c[1] = (uint8_t)(data >> 8); - c[2] = (uint8_t)(data >> 16); - } -} - -/** - * \brief Decrypts the ciphertext with TinyJAMBU to produce the plaintext. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to process the ciphertext. - * \param m Points to the plaintext output buffer. - * \param c Points to the ciphertext input buffer. - * \param mlen Length of the plaintext in bytes. - */ -static void tiny_jambu_decrypt - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *m, - const unsigned char *c, unsigned long long mlen) -{ - uint32_t data; - - /* Process as many full 32-bit words as we can */ - while (mlen >= 4) { - state[1] ^= 0x50; /* Domain separator for message data */ - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word32(c) ^ state[2]; - state[3] ^= data; - le_store_word32(m, data); - c += 4; - m += 4; - mlen -= 4; - } - - /* Handle the left-over ciphertext data bytes, if any */ - if (mlen == 1) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = (c[0] ^ state[2]) & 0xFFU; - state[3] ^= data; - state[1] ^= 0x01; - m[0] = (uint8_t)data; - } else if (mlen == 2) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = (le_load_word16(c) ^ state[2]) & 0xFFFFU; - state[3] ^= data; - state[1] ^= 0x02; - m[0] = (uint8_t)data; - m[1] = (uint8_t)(data >> 8); - } else if (mlen == 3) { - state[1] ^= 0x50; - tiny_jambu_permutation(state, key, key_words, rounds); - data = le_load_word16(c) | (((uint32_t)(c[2])) << 16); - data = (data ^ state[2]) & 0xFFFFFFU; - state[3] ^= data; - state[1] ^= 0x03; - m[0] = (uint8_t)data; - m[1] = (uint8_t)(data >> 8); - m[2] = (uint8_t)(data >> 16); - } -} - -/** - * \brief Generates the final authentication tag for TinyJAMBU. - * - * \param state TinyJAMBU state to be permuted. - * \param key Points to the key words. - * \param key_words The number of words in the key. - * \param rounds The number of rounds to perform to generate the tag. - * \param tag Buffer to receive the tag. - */ -static void tiny_jambu_generate_tag - (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, - unsigned key_words, unsigned rounds, unsigned char *tag) -{ - state[1] ^= 0x70; /* Domain separator for finalization */ - tiny_jambu_permutation(state, key, key_words, rounds); - le_store_word32(tag, state[2]); - state[1] ^= 0x70; - tiny_jambu_permutation(state, key, key_words, TINYJAMBU_ROUNDS(384)); - le_store_word32(tag + 4, state[2]); -} - -int tiny_jambu_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[4]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 4, TINYJAMBU_ROUNDS(1024), npub); - tiny_jambu_process_ad(state, key, 4, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 4, TINYJAMBU_ROUNDS(1024), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 4, TINYJAMBU_ROUNDS(1024), c + mlen); - return 0; -} - -int tiny_jambu_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[4]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 4, TINYJAMBU_ROUNDS(1024), npub); - tiny_jambu_process_ad(state, key, 4, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 4, TINYJAMBU_ROUNDS(1024), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 4, TINYJAMBU_ROUNDS(1024), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} - -int tiny_jambu_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[12]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key and duplicate it to make the length a multiple of 4 */ - key[6] = key[0] = le_load_word32(k); - key[7] = key[1] = le_load_word32(k + 4); - key[8] = key[2] = le_load_word32(k + 8); - key[9] = key[3] = le_load_word32(k + 12); - key[10] = key[4] = le_load_word32(k + 16); - key[11] = key[5] = le_load_word32(k + 20); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 12, TINYJAMBU_ROUNDS(1152), npub); - tiny_jambu_process_ad(state, key, 12, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 12, TINYJAMBU_ROUNDS(1152), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 12, TINYJAMBU_ROUNDS(1152), c + mlen); - return 0; -} - -int tiny_jambu_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[12]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key and duplicate it to make the length a multiple of 4 */ - key[6] = key[0] = le_load_word32(k); - key[7] = key[1] = le_load_word32(k + 4); - key[8] = key[2] = le_load_word32(k + 8); - key[9] = key[3] = le_load_word32(k + 12); - key[10] = key[4] = le_load_word32(k + 16); - key[11] = key[5] = le_load_word32(k + 20); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 12, TINYJAMBU_ROUNDS(1152), npub); - tiny_jambu_process_ad(state, key, 12, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 12, TINYJAMBU_ROUNDS(1152), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 12, TINYJAMBU_ROUNDS(1152), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} - -int tiny_jambu_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[8]; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - key[4] = le_load_word32(k + 16); - key[5] = le_load_word32(k + 20); - key[6] = le_load_word32(k + 24); - key[7] = le_load_word32(k + 28); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 8, TINYJAMBU_ROUNDS(1280), npub); - tiny_jambu_process_ad(state, key, 8, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - tiny_jambu_encrypt(state, key, 8, TINYJAMBU_ROUNDS(1280), c, m, mlen); - - /* Generate the authentication tag */ - tiny_jambu_generate_tag(state, key, 8, TINYJAMBU_ROUNDS(1280), c + mlen); - return 0; -} - -int tiny_jambu_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - uint32_t state[TINY_JAMBU_STATE_SIZE]; - uint32_t key[8]; - unsigned char tag[TINY_JAMBU_TAG_SIZE]; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < TINY_JAMBU_TAG_SIZE) - return -1; - *mlen = clen - TINY_JAMBU_TAG_SIZE; - - /* Unpack the key */ - key[0] = le_load_word32(k); - key[1] = le_load_word32(k + 4); - key[2] = le_load_word32(k + 8); - key[3] = le_load_word32(k + 12); - key[4] = le_load_word32(k + 16); - key[5] = le_load_word32(k + 20); - key[6] = le_load_word32(k + 24); - key[7] = le_load_word32(k + 28); - - /* Set up the TinyJAMBU state with the key, nonce, and associated data */ - tiny_jambu_setup(state, key, 8, TINYJAMBU_ROUNDS(1280), npub); - tiny_jambu_process_ad(state, key, 8, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - tiny_jambu_decrypt(state, key, 8, TINYJAMBU_ROUNDS(1280), m, c, *mlen); - - /* Check the authentication tag */ - tiny_jambu_generate_tag(state, key, 8, TINYJAMBU_ROUNDS(1280), tag); - return aead_check_tag(m, *mlen, tag, c + *mlen, TINY_JAMBU_TAG_SIZE); -} diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.h b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.h deleted file mode 100644 index cb304ff..0000000 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys-avr/tinyjambu.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_TINYJAMBU_H -#define LWCRYPTO_TINYJAMBU_H - -#include "aead-common.h" - -/** - * \file tinyjambu.h - * \brief TinyJAMBU authenticated encryption algorithm. - * - * TinyJAMBU is a family of encryption algorithms that are built around a - * lightweight 128-bit permutation. There are three variants of TinyJAMBU - * with different key sizes: - * - * \li TinyJAMBU-128 with a 128-bit key, a 96-bit nonce, and a 64-bit tag. - * This is the primary member of the family. - * \li TinyJAMBU-192 with a 192-bit key, a 96-bit nonce, and a 64-bit tag. - * \li TinyJAMBU-256 with a 256-bit key, a 96-bit nonce, and a 64-bit tag. - * - * TinyJAMBU has one of the smallest RAM and flash memory footprints - * out of all the algorithms in this library. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for TinyJAMBU-128. - */ -#define TINY_JAMBU_128_KEY_SIZE 16 - -/** - * \brief Size of the key for TinyJAMBU-192. - */ -#define TINY_JAMBU_192_KEY_SIZE 24 - -/** - * \brief Size of the key for TinyJAMBU-256. - */ -#define TINY_JAMBU_256_KEY_SIZE 32 - -/** - * \brief Size of the authentication tag for all TinyJAMBU variants. - */ -#define TINY_JAMBU_TAG_SIZE 8 - -/** - * \brief Size of the nonce for all TinyJAMBU variants. - */ -#define TINY_JAMBU_NONCE_SIZE 12 - -/** - * \brief Meta-information block for the TinyJAMBU-128 cipher. - */ -extern aead_cipher_t const tiny_jambu_128_cipher; - -/** - * \brief Meta-information block for the TinyJAMBU-192 cipher. - */ -extern aead_cipher_t const tiny_jambu_192_cipher; - -/** - * \brief Meta-information block for the TinyJAMBU-256 cipher. - */ -extern aead_cipher_t const tiny_jambu_256_cipher; - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-128. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_128_aead_decrypt() - */ -int tiny_jambu_128_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-128. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_128_aead_encrypt() - */ -int tiny_jambu_128_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-192. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 24 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_192_aead_decrypt() - */ -int tiny_jambu_192_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-192. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 24 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_192_aead_encrypt() - */ -int tiny_jambu_192_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Encrypts and authenticates a packet with TinyJAMBU-256. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 8 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 32 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa tiny_jambu_256_aead_decrypt() - */ -int tiny_jambu_256_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with TinyJAMBU-256. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 8 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 12 bytes in length. - * \param k Points to the 32 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa tiny_jambu_256_aead_encrypt() - */ -int tiny_jambu_256_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu-avr.S b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu-avr.S new file mode 100644 index 0000000..c7f2d1c --- /dev/null +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu-avr.S @@ -0,0 +1,471 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global tiny_jambu_permutation + .type tiny_jambu_permutation, @function +tiny_jambu_permutation: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + push r16 + push r17 + movw r26,r24 + movw r30,r22 +.L__stack_usage = 18 + ld r22,X+ + ld r23,X+ + ld r28,X+ + ld r29,X+ + ld r2,X+ + ld r3,X+ + ld r4,X+ + ld r5,X+ + ld r6,X+ + ld r7,X+ + ld r8,X+ + ld r9,X+ + ld r10,X+ + ld r11,X+ + ld r12,X+ + ld r13,X+ + lsl r20 + lsl r20 + mov r19,r1 +19: + movw r24,r4 + movw r16,r6 + mov r15,r3 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r22,r24 + eor r23,r25 + eor r28,r16 + eor r29,r17 + mov r14,r7 + mov r15,r8 + mov r24,r9 + mov r25,r10 + mov r0,r6 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r9 + mov r0,r8 + mov r17,r10 + mov r21,r11 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r11 + mov r17,r12 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r22,r14 + eor r23,r15 + eor r28,r24 + eor r29,r25 + movw r24,r10 + movw r16,r12 + mov r15,r9 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r22,r15 + eor r23,r24 + eor r28,r25 + eor r29,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r22,r14 + eor r23,r15 + eor r28,r24 + eor r29,r25 + movw r24,r8 + movw r16,r10 + mov r15,r7 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r2,r24 + eor r3,r25 + eor r4,r16 + eor r5,r17 + mov r14,r11 + mov r15,r12 + mov r24,r13 + mov r25,r22 + mov r0,r10 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r13 + mov r0,r12 + mov r17,r22 + mov r21,r23 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r23 + mov r17,r28 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + movw r24,r22 + movw r16,r28 + mov r15,r13 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r2,r15 + eor r3,r24 + eor r4,r25 + eor r5,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r2,r14 + eor r3,r15 + eor r4,r24 + eor r5,r25 + movw r24,r12 + movw r16,r22 + mov r15,r11 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r6,r24 + eor r7,r25 + eor r8,r16 + eor r9,r17 + mov r14,r23 + mov r15,r28 + mov r24,r29 + mov r25,r2 + mov r0,r22 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r29 + mov r0,r28 + mov r17,r2 + mov r21,r3 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r3 + mov r17,r4 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + movw r24,r2 + movw r16,r4 + mov r15,r29 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r6,r15 + eor r7,r24 + eor r8,r25 + eor r9,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r6,r14 + eor r7,r15 + eor r8,r24 + eor r9,r25 + movw r24,r28 + movw r16,r2 + mov r15,r23 + lsl r15 + rol r24 + rol r25 + rol r16 + rol r17 + eor r10,r24 + eor r11,r25 + eor r12,r16 + eor r13,r17 + mov r14,r3 + mov r15,r4 + mov r24,r5 + mov r25,r6 + mov r0,r2 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + lsl r0 + rol r14 + rol r15 + rol r24 + rol r25 + mov r16,r5 + mov r0,r4 + mov r17,r6 + mov r21,r7 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + lsl r0 + rol r16 + rol r17 + rol r21 + and r14,r16 + and r15,r17 + and r24,r21 + mov r16,r7 + mov r17,r8 + lsl r16 + rol r17 + lsl r16 + rol r17 + lsl r16 + rol r17 + and r25,r17 + com r14 + com r15 + com r24 + com r25 + eor r10,r14 + eor r11,r15 + eor r12,r24 + eor r13,r25 + movw r24,r6 + movw r16,r8 + mov r15,r5 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + lsr r17 + ror r16 + ror r25 + ror r24 + ror r15 + eor r10,r15 + eor r11,r24 + eor r12,r25 + eor r13,r16 + ld r14,Z+ + ld r15,Z+ + ld r24,Z+ + ld r25,Z+ + eor r10,r14 + eor r11,r15 + eor r12,r24 + eor r13,r25 + dec r18 + breq 401f + subi r19,240 + cp r19,r20 + breq 5396f + rjmp 19b +5396: + sub r30,r20 + sbc r31,r1 + mov r19,r1 + rjmp 19b +401: + st -X,r13 + st -X,r12 + st -X,r11 + st -X,r10 + st -X,r9 + st -X,r8 + st -X,r7 + st -X,r6 + st -X,r5 + st -X,r4 + st -X,r3 + st -X,r2 + st -X,r29 + st -X,r28 + st -X,r23 + st -X,r22 + pop r17 + pop r16 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size tiny_jambu_permutation, .-tiny_jambu_permutation + +#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu.c b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu.c index 7308718..7f6fcf2 100644 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu.c +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-tinyjambu.c @@ -22,6 +22,8 @@ #include "internal-tinyjambu.h" +#if !defined(__AVR__) + void tiny_jambu_permutation (uint32_t state[TINY_JAMBU_STATE_SIZE], const uint32_t *key, unsigned key_words, unsigned rounds) @@ -64,3 +66,5 @@ void tiny_jambu_permutation state[2] = s2; state[3] = s3; } + +#endif diff --git a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-util.h b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-util.h index e79158c..e30166d 100644 --- a/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-util.h +++ b/tinyjambu/Implementations/crypto_aead/tinyjambu256/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.c b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.h b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/api.h b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/encrypt.c b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/encrypt.c deleted file mode 100644 index 0ed30f7..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "wage.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return wage_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return wage_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-util.h b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.c b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.c deleted file mode 100644 index e9528c9..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.c +++ /dev/null @@ -1,512 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-wage.h" - -/** - * \brief Number of rounds for the WAGE permutation. - */ -#define WAGE_NUM_ROUNDS 111 - -/** - * \brief Define WAGE_64BIT to use the 64-bit version of the WAGE core - * permutation. Undefine to use the 8-bit version instead. - */ -#define WAGE_64BIT 1 - -/** - * \brief RC0 and RC1 round constants for WAGE, interleaved with each other. - */ -static unsigned char const wage_rc[WAGE_NUM_ROUNDS * 2] = { - 0x7f, 0x3f, 0x1f, 0x0f, 0x07, 0x03, 0x01, 0x40, 0x20, 0x10, 0x08, 0x04, - 0x02, 0x41, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x43, 0x21, 0x50, 0x28, 0x14, - 0x0a, 0x45, 0x62, 0x71, 0x78, 0x3c, 0x1e, 0x4f, 0x27, 0x13, 0x09, 0x44, - 0x22, 0x51, 0x68, 0x34, 0x1a, 0x4d, 0x66, 0x73, 0x39, 0x5c, 0x2e, 0x57, - 0x2b, 0x15, 0x4a, 0x65, 0x72, 0x79, 0x7c, 0x3e, 0x5f, 0x2f, 0x17, 0x0b, - 0x05, 0x42, 0x61, 0x70, 0x38, 0x1c, 0x0e, 0x47, 0x23, 0x11, 0x48, 0x24, - 0x12, 0x49, 0x64, 0x32, 0x59, 0x6c, 0x36, 0x5b, 0x2d, 0x56, 0x6b, 0x35, - 0x5a, 0x6d, 0x76, 0x7b, 0x3d, 0x5e, 0x6f, 0x37, 0x1b, 0x0d, 0x46, 0x63, - 0x31, 0x58, 0x2c, 0x16, 0x4b, 0x25, 0x52, 0x69, 0x74, 0x3a, 0x5d, 0x6e, - 0x77, 0x3b, 0x1d, 0x4e, 0x67, 0x33, 0x19, 0x4c, 0x26, 0x53, 0x29, 0x54, - 0x2a, 0x55, 0x6a, 0x75, 0x7a, 0x7d, 0x7e, 0x7f, 0x3f, 0x1f, 0x0f, 0x07, - 0x03, 0x01, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x41, 0x60, 0x30, 0x18, - 0x0c, 0x06, 0x43, 0x21, 0x50, 0x28, 0x14, 0x0a, 0x45, 0x62, 0x71, 0x78, - 0x3c, 0x1e, 0x4f, 0x27, 0x13, 0x09, 0x44, 0x22, 0x51, 0x68, 0x34, 0x1a, - 0x4d, 0x66, 0x73, 0x39, 0x5c, 0x2e, 0x57, 0x2b, 0x15, 0x4a, 0x65, 0x72, - 0x79, 0x7c, 0x3e, 0x5f, 0x2f, 0x17, 0x0b, 0x05, 0x42, 0x61, 0x70, 0x38, - 0x1c, 0x0e, 0x47, 0x23, 0x11, 0x48, 0x24, 0x12, 0x49, 0x64, 0x32, 0x59, - 0x6c, 0x36, 0x5b, 0x2d, 0x56, 0x6b, 0x35, 0x5a, 0x6d, 0x76, 0x7b, 0x3d, - 0x5e, 0x6f, 0x37, 0x1b, 0x0d, 0x46 -}; - -/** - * \brief Apply the WGP permutation to a 7-bit component. - * - * Warning: This is not constant cache. - */ -static unsigned char const wage_wgp[128] = { - 0x00, 0x12, 0x0a, 0x4b, 0x66, 0x0c, 0x48, 0x73, 0x79, 0x3e, 0x61, 0x51, - 0x01, 0x15, 0x17, 0x0e, 0x7e, 0x33, 0x68, 0x36, 0x42, 0x35, 0x37, 0x5e, - 0x53, 0x4c, 0x3f, 0x54, 0x58, 0x6e, 0x56, 0x2a, 0x1d, 0x25, 0x6d, 0x65, - 0x5b, 0x71, 0x2f, 0x20, 0x06, 0x18, 0x29, 0x3a, 0x0d, 0x7a, 0x6c, 0x1b, - 0x19, 0x43, 0x70, 0x41, 0x49, 0x22, 0x77, 0x60, 0x4f, 0x45, 0x55, 0x02, - 0x63, 0x47, 0x75, 0x2d, 0x40, 0x46, 0x7d, 0x5c, 0x7c, 0x59, 0x26, 0x0b, - 0x09, 0x03, 0x57, 0x5d, 0x27, 0x78, 0x30, 0x2e, 0x44, 0x52, 0x3b, 0x08, - 0x67, 0x2c, 0x05, 0x6b, 0x2b, 0x1a, 0x21, 0x38, 0x07, 0x0f, 0x4a, 0x11, - 0x50, 0x6a, 0x28, 0x31, 0x10, 0x4d, 0x5f, 0x72, 0x39, 0x16, 0x5a, 0x13, - 0x04, 0x3c, 0x34, 0x1f, 0x76, 0x1e, 0x14, 0x23, 0x1c, 0x32, 0x4e, 0x7b, - 0x24, 0x74, 0x7f, 0x3d, 0x69, 0x64, 0x62, 0x6f -}; - -/** - * \brief Evaluate the WAGE S-box three times in parallel. - * - * \param x6 The input values to the S-box. - * \return The output values from the S-box. - * - * This function directly evaluates the S-box in bit-sliced form - * using the algorithm from the specification. - */ -STATIC_INLINE uint32_t wage_sbox_parallel_3(uint32_t x6) -{ - uint32_t x0 = x6 >> 6; - uint32_t x1 = x6 >> 5; - uint32_t x2 = x6 >> 4; - uint32_t x3 = x6 >> 3; - uint32_t x4 = x6 >> 2; - uint32_t x5 = x6 >> 1; - x0 ^= (x2 & x3); x3 = ~x3; x3 ^= (x5 & x6); x5 = ~x5; x5 ^= (x2 & x4); - x6 ^= (x0 & x4); x4 = ~x4; x4 ^= (x5 & x1); x5 = ~x5; x5 ^= (x0 & x2); - x1 ^= (x6 & x2); x2 = ~x2; x2 ^= (x5 & x3); x5 = ~x5; x5 ^= (x6 & x0); - x3 ^= (x1 & x0); x0 = ~x0; x0 ^= (x5 & x4); x5 = ~x5; x5 ^= (x1 & x6); - x4 ^= (x3 & x6); x6 = ~x6; x6 ^= (x5 & x2); x5 = ~x5; x5 ^= (x3 & x1); - x2 ^= (x4 & x1); x1 = ~x1; x1 ^= (x5 & x0); x5 = ~x5; x5 ^= (x4 & x3); - x2 = ~x2; x4 = ~x4; - return ((x2 & 0x00010101U) << 6) ^ - ((x6 & 0x00010101U) << 5) ^ - ((x4 & 0x00010101U) << 4) ^ - ((x1 & 0x00010101U) << 3) ^ - ((x3 & 0x00010101U) << 2) ^ - ((x5 & 0x00010101U) << 1) ^ - (x0 & 0x00010101U); -} - -void wage_permute(unsigned char s[WAGE_STATE_SIZE]) -{ -#if defined(WAGE_64BIT) - const unsigned char *rc = wage_rc; - unsigned char round; - uint64_t x0, x1, x2, x3, x4; - uint32_t fb, temp; - - /* Load the state into 64-bit words. Each word will have up to eight - * 7-bit components with the MSB of each component fixed at zero. - * - * x0 = s[0] .. s[7] - * x1 = s[8] .. s[15] - * x2 = s[16] .. s[23] - * x3 = s[24] .. s[31] - * x4 = s[32] .. s[36] - */ - x0 = le_load_word64(s); - x1 = le_load_word64(s + 8); - x2 = le_load_word64(s + 16); - x3 = le_load_word64(s + 24); - x4 = le_load_word32(s + 32) | (((uint64_t)(s[36])) << 32); - - /* Perform all rounds 3 at a time to reduce the state rotation overhead */ - for (round = 0; round < (WAGE_NUM_ROUNDS / 3); ++round, rc += 6) { - /* Calculate the feedback value for the LFSR. - * - * fb = omega(s[0]) ^ s[6] ^ s[8] ^ s[12] ^ s[13] ^ s[19] ^ - * s[24] ^ s[26] ^ s[30] ^ s[31] ^ WGP(s[36]) ^ RC1[round] - * - * where omega(x) is (x >> 1) if the low bit of x is zero and - * (x >> 1) ^ 0x78 if the low bit of x is one. - */ - /* fb0 = omega(s[0]), fb1 = omega(s[1]), fb2 = omega(s[2]) */ - temp = (uint32_t)x0; - fb = (temp & 0x00010101U) << 6; - fb ^= (fb >> 1); - fb ^= (fb >> 2); - fb ^= (temp >> 1) & 0x003F3F3FU; - /* fb0 ^= s[6], fb1 ^= s[7], fb2 ^= s[8] */ - fb ^= (uint32_t)(x0 >> 48); - fb ^= ((uint32_t)x1) << 16; - /* fb0 ^= s[8], fb1 ^= s[9], fb2 ^= s[10] */ - fb ^= (uint32_t)x1; - /* fb0 ^= s[12], fb1 ^= s[13], fb2 ^= s[14] */ - fb ^= (uint32_t)(x1 >> 32); - /* fb0 ^= s[13], fb1 ^= s[14], fb2 ^= s[15] */ - fb ^= (uint32_t)(x1 >> 40); - /* fb0 ^= s[19], fb1 ^= s[20], fb2 ^= s[21] */ - fb ^= (uint32_t)(x2 >> 24); - /* fb0 ^= s[24], fb1 ^= s[25], fb2 ^= s[26] */ - fb ^= (uint32_t)x3; - /* fb0 ^= s[26], fb1 ^= s[27], fb2 ^= s[28] */ - fb ^= (uint32_t)(x3 >> 16); - /* fb0 ^= s[30], fb1 ^= s[31], fb2 ^= s[32] */ - fb ^= (uint32_t)(x3 >> 48); - fb ^= ((uint32_t)x4) << 16; - /* fb0 ^= s[31], fb1 ^= s[32], fb2 ^= s[33] */ - fb ^= (uint32_t)(x3 >> 56); - fb ^= ((uint32_t)x4) << 8; - /* fb0,1,2 ^= RC1 */ - temp = rc[1] | (((uint32_t)(rc[3])) << 8) | (((uint32_t)(rc[5])) << 16); - fb ^= temp; - /* fb0 ^= WGP(s[36]) */ - fb ^= wage_wgp[(uint8_t)(x4 >> 32)]; - /* fb1 ^= WGP(fb0) */ - fb ^= ((uint32_t)(wage_wgp[fb & 0xFF])) << 8; - /* fb2 ^= WGP(fb1) */ - fb ^= ((uint32_t)(wage_wgp[(fb >> 8) & 0xFF])) << 16; - - /* Apply the S-box and WGP permutation to certain components */ - /* s[5] ^= sbox[s[8]], s[6] ^= sbox[s[9]], s[7] ^= sbox[s[10]] */ - x0 ^= ((uint64_t)wage_sbox_parallel_3((uint32_t)x1)) << 40; - /* s[11] ^= sbox[s[15]], s[12] ^= sbox[s[16]], s[13] ^= sbox[s[17]] */ - x1 ^= ((uint64_t)wage_sbox_parallel_3 - ((uint32_t)((x1 >> 56) | (x2 << 8)))) << 24; - /* s[24] ^= sbox[s[27]], s[25] ^= sbox[s[28]], s[26] ^= sbox[s[29]] */ - x3 ^= (uint64_t)wage_sbox_parallel_3((uint32_t)(x3 >> 24)); - /* s[30] ^= sbox[s[34]], s[31] ^= sbox[s[35]], s[32] ^= sbox[s[36]] */ - temp = wage_sbox_parallel_3((uint32_t)(x4 >> 16)); - x3 ^= ((uint64_t)temp) << 48; - x4 ^= temp >> 16; - /* s[19] ^= WGP[s[18]] ^ RC0 */ - temp = (uint32_t)(x2 >> 16); /* s[18..21] */ - temp ^= ((uint32_t)(wage_wgp[temp & 0x7F])) << 8; - temp ^= ((uint32_t)(rc[0])) << 8; - /* s[20] ^= WGP[s[19]] ^ RC0 */ - temp ^= ((uint32_t)(wage_wgp[(temp >> 8) & 0x7F])) << 16; - temp ^= ((uint32_t)(rc[2])) << 16; - /* s[21] ^= WGP[s[20]] ^ RC0 */ - temp ^= ((uint32_t)(wage_wgp[(temp >> 16) & 0x7F])) << 24; - temp ^= ((uint32_t)(rc[4])) << 24; - temp &= 0x7F7F7F00U; - x2 = (x2 & 0xFFFF000000FFFFFFULL) | (((uint64_t)temp) << 16); - - /* Rotate the components of the state by 3 positions */ - x0 = (x0 >> 24) | (x1 << 40); - x1 = (x1 >> 24) | (x2 << 40); - x2 = (x2 >> 24) | (x3 << 40); - x3 = (x3 >> 24) | (x4 << 40); - x4 = (x4 >> 24) | (((uint64_t)(fb & 0x00FFFFFFU)) << 16); - } - - /* Save the words back to the state */ - le_store_word64(s, x0); - le_store_word64(s + 8, x1); - le_store_word64(s + 16, x2); - le_store_word64(s + 24, x3); - le_store_word32(s + 32, (uint32_t)x4); - s[36] = (unsigned char)(x4 >> 32); -#else /* 8-bit version of WAGE */ - const unsigned char *rc = wage_rc; - unsigned char round, index; - unsigned char fb0, fb1, fb2; - uint32_t temp; - - /* Perform all rounds 3 at a time to reduce the state rotation overhead */ - for (round = 0; round < (WAGE_NUM_ROUNDS / 3); ++round, rc += 6) { - /* Calculate the feedback value for the LFSR. - * - * fb = omega(s[0]) ^ s[6] ^ s[8] ^ s[12] ^ s[13] ^ s[19] ^ - * s[24] ^ s[26] ^ s[30] ^ s[31] ^ WGP(s[36]) ^ RC1[round] - * - * where omega(x) is (x >> 1) if the low bit of x is zero and - * (x >> 1) ^ 0x78 if the low bit of x is one. - */ - fb0 = (s[0] >> 1) ^ (0x78 & -(s[0] & 0x01)); - fb0 ^= s[6] ^ s[8] ^ s[12] ^ s[13] ^ s[19] ^ - s[24] ^ s[26] ^ s[30] ^ s[31] ^ rc[1]; - fb0 ^= wage_wgp[s[36]]; - fb1 = (s[1] >> 1) ^ (0x78 & -(s[1] & 0x01)); - fb1 ^= s[7] ^ s[9] ^ s[13] ^ s[14] ^ s[20] ^ - s[25] ^ s[27] ^ s[31] ^ s[32] ^ rc[3]; - fb1 ^= wage_wgp[fb0]; - fb2 = (s[2] >> 1) ^ (0x78 & -(s[2] & 0x01)); - fb2 ^= s[8] ^ s[10] ^ s[14] ^ s[15] ^ s[21] ^ - s[26] ^ s[28] ^ s[32] ^ s[33] ^ rc[5]; - fb2 ^= wage_wgp[fb1]; - - /* Apply the S-box and WGP permutation to certain components */ - temp = s[8] | (((uint32_t)(s[9])) << 8) | (((uint32_t)(s[10])) << 16); - temp = wage_sbox_parallel_3(temp); - s[5] ^= (unsigned char)temp; - s[6] ^= (unsigned char)(temp >> 8); - s[7] ^= (unsigned char)(temp >> 16); - temp = s[15] | (((uint32_t)(s[16])) << 8) | (((uint32_t)(s[17])) << 16); - temp = wage_sbox_parallel_3(temp); - s[11] ^= (unsigned char)temp; - s[12] ^= (unsigned char)(temp >> 8); - s[13] ^= (unsigned char)(temp >> 16); - s[19] ^= wage_wgp[s[18]] ^ rc[0]; - s[20] ^= wage_wgp[s[19]] ^ rc[2]; - s[21] ^= wage_wgp[s[20]] ^ rc[4]; - temp = s[27] | (((uint32_t)(s[28])) << 8) | (((uint32_t)(s[29])) << 16); - temp = wage_sbox_parallel_3(temp); - s[24] ^= (unsigned char)temp; - s[25] ^= (unsigned char)(temp >> 8); - s[26] ^= (unsigned char)(temp >> 16); - temp = s[34] | (((uint32_t)(s[35])) << 8) | (((uint32_t)(s[36])) << 16); - temp = wage_sbox_parallel_3(temp); - s[30] ^= (unsigned char)temp; - s[31] ^= (unsigned char)(temp >> 8); - s[32] ^= (unsigned char)(temp >> 16); - - /* Rotate the components of the state by 3 positions */ - for (index = 0; index < WAGE_STATE_SIZE - 3; ++index) - s[index] = s[index + 3]; - s[WAGE_STATE_SIZE - 3] = fb0; - s[WAGE_STATE_SIZE - 2] = fb1; - s[WAGE_STATE_SIZE - 1] = fb2; - } -#endif -} - -/* 7-bit components for the rate: 8, 9, 15, 16, 18, 27, 28, 34, 35, 36 */ - -void wage_absorb - (unsigned char s[WAGE_STATE_SIZE], const unsigned char data[8], - unsigned char domain) -{ - uint32_t temp; - temp = be_load_word32(data); - s[8] ^= (unsigned char)(temp >> 25); - s[9] ^= (unsigned char)((temp >> 18) & 0x7F); - s[15] ^= (unsigned char)((temp >> 11) & 0x7F); - s[16] ^= (unsigned char)((temp >> 4) & 0x7F); - s[18] ^= (unsigned char)((temp << 3) & 0x7F); - temp = be_load_word32(data + 4); - s[18] ^= (unsigned char)(temp >> 29); - s[27] ^= (unsigned char)((temp >> 22) & 0x7F); - s[28] ^= (unsigned char)((temp >> 15) & 0x7F); - s[34] ^= (unsigned char)((temp >> 8) & 0x7F); - s[35] ^= (unsigned char)((temp >> 1) & 0x7F); - s[36] ^= (unsigned char)((temp << 6) & 0x7F); - s[0] ^= domain; -} - -void wage_get_rate - (const unsigned char s[WAGE_STATE_SIZE], unsigned char data[8]) -{ - uint32_t temp; - temp = ((uint32_t)(s[8])) << 25; - temp |= ((uint32_t)(s[9])) << 18; - temp |= ((uint32_t)(s[15])) << 11; - temp |= ((uint32_t)(s[16])) << 4; - temp |= ((uint32_t)(s[18])) >> 3; - be_store_word32(data, temp); - temp = ((uint32_t)(s[18])) << 29; - temp |= ((uint32_t)(s[27])) << 22; - temp |= ((uint32_t)(s[28])) << 15; - temp |= ((uint32_t)(s[34])) << 8; - temp |= ((uint32_t)(s[35])) << 1; - temp |= ((uint32_t)(s[36])) >> 6; - be_store_word32(data + 4, temp); -} - -void wage_set_rate - (unsigned char s[WAGE_STATE_SIZE], const unsigned char data[8], - unsigned char domain) -{ - uint32_t temp; - temp = be_load_word32(data); - s[8] = (unsigned char)(temp >> 25); - s[9] = (unsigned char)((temp >> 18) & 0x7F); - s[15] = (unsigned char)((temp >> 11) & 0x7F); - s[16] = (unsigned char)((temp >> 4) & 0x7F); - s[18] = (unsigned char)((temp << 3) & 0x7F); - temp = be_load_word32(data + 4); - s[18] ^= (unsigned char)(temp >> 29); - s[27] = (unsigned char)((temp >> 22) & 0x7F); - s[28] = (unsigned char)((temp >> 15) & 0x7F); - s[34] = (unsigned char)((temp >> 8) & 0x7F); - s[35] = (unsigned char)((temp >> 1) & 0x7F); - s[36] = (unsigned char)(((temp << 6) & 0x40) ^ (s[36] & 0x3F)); - s[0] ^= domain; -} - -/** - * \brief Converts a 128-bit value into an array of 7-bit components. - * - * \param out Points to the output array of 7-bit components. - * \param in Points to the 128-bit value to convert. - */ -static void wage_128bit_to_components - (unsigned char out[19], const unsigned char *in) -{ - uint32_t temp; - temp = be_load_word32(in); - out[0] = (unsigned char)(temp >> 25); - out[1] = (unsigned char)((temp >> 18) & 0x7F); - out[2] = (unsigned char)((temp >> 11) & 0x7F); - out[3] = (unsigned char)((temp >> 4) & 0x7F); - out[4] = (unsigned char)((temp << 3) & 0x7F); - temp = be_load_word32(in + 4); - out[4] ^= (unsigned char)(temp >> 29); - out[5] = (unsigned char)((temp >> 22) & 0x7F); - out[6] = (unsigned char)((temp >> 15) & 0x7F); - out[7] = (unsigned char)((temp >> 8) & 0x7F); - out[8] = (unsigned char)((temp >> 1) & 0x7F); - out[18] = (unsigned char)((temp << 6) & 0x7F); - temp = be_load_word32(in + 8); - out[9] = (unsigned char)(temp >> 25); - out[10] = (unsigned char)((temp >> 18) & 0x7F); - out[11] = (unsigned char)((temp >> 11) & 0x7F); - out[12] = (unsigned char)((temp >> 4) & 0x7F); - out[13] = (unsigned char)((temp << 3) & 0x7F); - temp = be_load_word32(in + 12); - out[13] ^= (unsigned char)(temp >> 29); - out[14] = (unsigned char)((temp >> 22) & 0x7F); - out[15] = (unsigned char)((temp >> 15) & 0x7F); - out[16] = (unsigned char)((temp >> 8) & 0x7F); - out[17] = (unsigned char)((temp >> 1) & 0x7F); - out[18] ^= (unsigned char)((temp << 5) & 0x20); -} - -void wage_absorb_key - (unsigned char s[WAGE_STATE_SIZE], const unsigned char *key) -{ - unsigned char components[19]; - wage_128bit_to_components(components, key); - s[8] ^= components[0]; - s[9] ^= components[1]; - s[15] ^= components[2]; - s[16] ^= components[3]; - s[18] ^= components[4]; - s[27] ^= components[5]; - s[28] ^= components[6]; - s[34] ^= components[7]; - s[35] ^= components[8]; - s[36] ^= components[18] & 0x40; - wage_permute(s); - s[8] ^= components[9]; - s[9] ^= components[10]; - s[15] ^= components[11]; - s[16] ^= components[12]; - s[18] ^= components[13]; - s[27] ^= components[14]; - s[28] ^= components[15]; - s[34] ^= components[16]; - s[35] ^= components[17]; - s[36] ^= (components[18] << 1) & 0x40; - wage_permute(s); -} - -void wage_init - (unsigned char s[WAGE_STATE_SIZE], - const unsigned char *key, const unsigned char *nonce) -{ - unsigned char components[19]; - - /* Initialize the state with the key and nonce */ - wage_128bit_to_components(components, key); - s[0] = components[0]; - s[1] = components[2]; - s[2] = components[4]; - s[3] = components[6]; - s[4] = components[8]; - s[5] = components[10]; - s[6] = components[12]; - s[7] = components[14]; - s[8] = components[16]; - s[18] = components[18]; - s[19] = components[1]; - s[20] = components[3]; - s[21] = components[5]; - s[22] = components[7]; - s[23] = components[9]; - s[24] = components[11]; - s[25] = components[13]; - s[26] = components[15]; - s[27] = components[17]; - wage_128bit_to_components(components, nonce); - s[9] = components[1]; - s[10] = components[3]; - s[11] = components[5]; - s[12] = components[7]; - s[13] = components[9]; - s[14] = components[11]; - s[15] = components[13]; - s[16] = components[17]; - s[17] = components[15]; - s[18] ^= (components[18] >> 2); - s[28] = components[0]; - s[29] = components[2]; - s[30] = components[4]; - s[31] = components[6]; - s[32] = components[8]; - s[33] = components[10]; - s[34] = components[12]; - s[35] = components[14]; - s[36] = components[16]; - - /* Permute the state to absorb the key and nonce */ - wage_permute(s); - - /* Absorb the key again and permute the state */ - wage_absorb_key(s, key); -} - -void wage_extract_tag - (const unsigned char s[WAGE_STATE_SIZE], unsigned char tag[16]) -{ - unsigned char components[19]; - uint32_t temp; - - /* Extract the 7-bit components that make up the tag */ - for (temp = 0; temp < 9; ++temp) { - components[temp * 2] = s[28 + temp]; - components[temp * 2 + 1] = s[ 9 + temp]; - } - components[18] = (s[18] << 2) & 0x60; - - /* Convert from 7-bit component form back into bytes */ - temp = ((uint32_t)(components[0])) << 25; - temp |= ((uint32_t)(components[1])) << 18; - temp |= ((uint32_t)(components[2])) << 11; - temp |= ((uint32_t)(components[3])) << 4; - temp |= ((uint32_t)(components[4])) >> 3; - be_store_word32(tag, temp); - temp = ((uint32_t)(components[4])) << 29; - temp |= ((uint32_t)(components[5])) << 22; - temp |= ((uint32_t)(components[6])) << 15; - temp |= ((uint32_t)(components[7])) << 8; - temp |= ((uint32_t)(components[8])) << 1; - temp |= ((uint32_t)(components[9])) >> 6; - be_store_word32(tag + 4, temp); - temp = ((uint32_t)(components[9])) << 26; - temp |= ((uint32_t)(components[10])) << 19; - temp |= ((uint32_t)(components[11])) << 12; - temp |= ((uint32_t)(components[12])) << 5; - temp |= ((uint32_t)(components[13])) >> 2; - be_store_word32(tag + 8, temp); - temp = ((uint32_t)(components[13])) << 30; - temp |= ((uint32_t)(components[14])) << 23; - temp |= ((uint32_t)(components[15])) << 16; - temp |= ((uint32_t)(components[16])) << 9; - temp |= ((uint32_t)(components[17])) << 2; - temp |= ((uint32_t)(components[18])) >> 5; - be_store_word32(tag + 12, temp); -} diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.h b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.h deleted file mode 100644 index a0d23d7..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/internal-wage.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_WAGE_H -#define LW_INTERNAL_WAGE_H - -#include "internal-util.h" - -/** - * \file internal-wage.h - * \brief Internal implementation of the WAGE permutation. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/wage - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the WAGE state in bytes. - * - * The state is 259 bits, divided into 37 7-bit components, one per byte. - */ -#define WAGE_STATE_SIZE 37 - -/** - * \brief Permutes the WAGE state. - * - * \param s The WAGE state to be permuted. - */ -void wage_permute(unsigned char s[WAGE_STATE_SIZE]); - -/** - * \brief Absorbs 8 bytes into the WAGE state. - * - * \param s The WAGE state to be permuted. - * \param data The data to be absorbed. - * \param domain The domain separator for the absorbed data. - */ -void wage_absorb - (unsigned char s[WAGE_STATE_SIZE], const unsigned char data[8], - unsigned char domain); - -/** - * \brief Gets the 8 bytes of the rate from the WAGE state. - * - * \param s The WAGE state to get the bytes from. - * \param data Points to the buffer to receive the extracted bytes. - */ -void wage_get_rate - (const unsigned char s[WAGE_STATE_SIZE], unsigned char data[8]); - -/** - * \brief Sets the 8 bytes of the rate in the WAGE state. - * - * \param s The WAGE state to set the rate in. - * \param data Points to the bytes to set into the rate. - * \param domain The domain separator for the rate data. - */ -void wage_set_rate - (unsigned char s[WAGE_STATE_SIZE], const unsigned char data[8], - unsigned char domain); - -/** - * \brief Absorbs 16 key bytes into the WAGE state. - * - * \param s The WAGE state to be permuted. - * \param key Points to the key data to be absorbed. - */ -void wage_absorb_key - (unsigned char s[WAGE_STATE_SIZE], const unsigned char *key); - -/** - * \brief Initializes the WAGE state with a key and nonce. - * - * \param s The WAGE state to be initialized. - * \param key Points to the 128-bit key. - * \param nonce Points to the 128-bit nonce. - */ -void wage_init - (unsigned char s[WAGE_STATE_SIZE], - const unsigned char *key, const unsigned char *nonce); - -/** - * \brief Extracts the 128-bit authentication tag from the WAGE state. - * - * \param s The WAGE state to extract the tag from. - * \param tag Points to the buffer to receive the extracted tag. - */ -void wage_extract_tag - (const unsigned char s[WAGE_STATE_SIZE], unsigned char tag[16]); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.c b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.c deleted file mode 100644 index 374409b..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "wage.h" -#include "internal-wage.h" -#include - -aead_cipher_t const wage_cipher = { - "WAGE", - WAGE_KEY_SIZE, - WAGE_NONCE_SIZE, - WAGE_TAG_SIZE, - AEAD_FLAG_NONE, - wage_aead_encrypt, - wage_aead_decrypt -}; - -/** - * \brief Rate of absorbing data into the WAGE state in sponge mode. - */ -#define WAGE_RATE 8 - -/** - * \brief Processes associated data for WAGE. - * - * \param state Points to the WAGE state. - * \param pad Points to an 8-byte temporary buffer for handling padding. - * \param ad Points to the associated data. - * \param adlen Length of the associated data. - */ -static void wage_process_ad - (unsigned char state[WAGE_STATE_SIZE], unsigned char pad[WAGE_RATE], - const unsigned char *ad, unsigned long long adlen) -{ - unsigned temp; - - /* Process as many full blocks as possible */ - while (adlen >= WAGE_RATE) { - wage_absorb(state, ad, 0x40); - wage_permute(state); - ad += WAGE_RATE; - adlen -= WAGE_RATE; - } - - /* Pad and absorb the final block */ - temp = (unsigned)adlen; - memcpy(pad, ad, temp); - pad[temp] = 0x80; - memset(pad + temp + 1, 0, WAGE_RATE - temp - 1); - wage_absorb(state, pad, 0x40); - wage_permute(state); -} - -int wage_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[WAGE_STATE_SIZE]; - unsigned char block[WAGE_RATE]; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + WAGE_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - wage_init(state, k, npub); - if (adlen != 0) - wage_process_ad(state, block, ad, adlen); - - /* Encrypts the plaintext to produce the ciphertext */ - while (mlen >= WAGE_RATE) { - wage_get_rate(state, block); - lw_xor_block(block, m, WAGE_RATE); - wage_set_rate(state, block, 0x20); - wage_permute(state); - memcpy(c, block, WAGE_RATE); - c += WAGE_RATE; - m += WAGE_RATE; - mlen -= WAGE_RATE; - } - temp = (unsigned)mlen; - wage_get_rate(state, block); - lw_xor_block(block, m, temp); - block[temp] ^= 0x80; - wage_set_rate(state, block, 0x20); - wage_permute(state); - memcpy(c, block, temp); - - /* Generate and extract the authentication tag */ - wage_absorb_key(state, k); - wage_extract_tag(state, c + temp); - return 0; -} - -int wage_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - unsigned char state[WAGE_STATE_SIZE]; - unsigned char block[WAGE_TAG_SIZE]; - unsigned char *mtemp = m; - unsigned temp; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < WAGE_TAG_SIZE) - return -1; - *mlen = clen - WAGE_TAG_SIZE; - - /* Initialize the state and absorb the associated data */ - wage_init(state, k, npub); - if (adlen != 0) - wage_process_ad(state, block, ad, adlen); - - /* Decrypts the ciphertext to produce the plaintext */ - clen -= WAGE_TAG_SIZE; - while (clen >= WAGE_RATE) { - wage_get_rate(state, block); - lw_xor_block(block, c, WAGE_RATE); - wage_set_rate(state, c, 0x20); - wage_permute(state); - memcpy(m, block, WAGE_RATE); - c += WAGE_RATE; - m += WAGE_RATE; - clen -= WAGE_RATE; - } - temp = (unsigned)clen; - wage_get_rate(state, block); - lw_xor_block_2_src(block + 8, block, c, temp); - memcpy(block, c, temp); - block[temp] ^= 0x80; - wage_set_rate(state, block, 0x20); - wage_permute(state); - memcpy(m, block + 8, temp); - - /* Generate and check the authentication tag */ - wage_absorb_key(state, k); - wage_extract_tag(state, block); - return aead_check_tag(mtemp, *mlen, block, c + temp, WAGE_TAG_SIZE); -} diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.h b/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.h deleted file mode 100644 index 2a620c4..0000000 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys-avr/wage.h +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_WAGE_H -#define LWCRYPTO_WAGE_H - -#include "aead-common.h" - -/** - * \file wage.h - * \brief WAGE authenticated encryption algorithm. - * - * WAGE is an authenticated encryption algorithm that is built around the - * 259-bit WAGE permutation. The algorithm has a 128-bit key, a 128-bit - * nonce, and a 128-bit authentication tag. It is an evolution of the - * WG series of stream ciphers. - * - * References: https://uwaterloo.ca/communications-security-lab/lwc/wage - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for WAGE. - */ -#define WAGE_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for WAGE. - */ -#define WAGE_TAG_SIZE 16 - -/** - * \brief Size of the nonce for WAGE. - */ -#define WAGE_NONCE_SIZE 16 - -/** - * \brief Meta-information block for the WAGE cipher. - */ -extern aead_cipher_t const wage_cipher; - -/** - * \brief Encrypts and authenticates a packet with WAGE. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa wage_aead_decrypt() - */ -int wage_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with WAGE. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa wage_aead_encrypt() - */ -int wage_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/wage/Implementations/crypto_aead/wageae128v1/rhys/internal-util.h b/wage/Implementations/crypto_aead/wageae128v1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/wage/Implementations/crypto_aead/wageae128v1/rhys/internal-util.h +++ b/wage/Implementations/crypto_aead/wageae128v1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.c b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.h b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/api.h b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/api.h deleted file mode 100644 index b2f8a36..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/api.h +++ /dev/null @@ -1,5 +0,0 @@ -#define CRYPTO_KEYBYTES 16 -#define CRYPTO_NSECBYTES 0 -#define CRYPTO_NPUBBYTES 16 -#define CRYPTO_ABYTES 16 -#define CRYPTO_NOOVERLAP 1 diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/encrypt.c b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/encrypt.c deleted file mode 100644 index f7bb1b4..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/encrypt.c +++ /dev/null @@ -1,26 +0,0 @@ - -#include "xoodyak.h" - -int crypto_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - return xoodyak_aead_encrypt - (c, clen, m, mlen, ad, adlen, nsec, npub, k); -} - -int crypto_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - return xoodyak_aead_decrypt - (m, mlen, nsec, c, clen, ad, adlen, npub, k); -} diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-util.h b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo-avr.S b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo-avr.S deleted file mode 100644 index 629c19d..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo-avr.S +++ /dev/null @@ -1,935 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global xoodoo_permute - .type xoodoo_permute, @function -xoodoo_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 -.L__stack_usage = 16 - ldi r18,88 - mov r19,r1 - rcall 34f - ldi r18,56 - rcall 34f - ldi r18,192 - ldi r19,3 - rcall 34f - ldi r18,208 - mov r19,r1 - rcall 34f - ldi r18,32 - ldi r19,1 - rcall 34f - ldi r18,20 - mov r19,r1 - rcall 34f - ldi r18,96 - rcall 34f - ldi r18,44 - rcall 34f - ldi r18,128 - ldi r19,3 - rcall 34f - ldi r18,240 - mov r19,r1 - rcall 34f - ldi r18,160 - ldi r19,1 - rcall 34f - ldi r18,18 - mov r19,r1 - rcall 34f - rjmp 888f -34: - ldd r6,Z+12 - ldd r7,Z+13 - ldd r8,Z+14 - ldd r9,Z+15 - ldd r0,Z+28 - eor r6,r0 - ldd r0,Z+29 - eor r7,r0 - ldd r0,Z+30 - eor r8,r0 - ldd r0,Z+31 - eor r9,r0 - ldd r0,Z+44 - eor r6,r0 - ldd r0,Z+45 - eor r7,r0 - ldd r0,Z+46 - eor r8,r0 - ldd r0,Z+47 - eor r9,r0 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - movw r10,r20 - movw r12,r22 - eor r10,r26 - eor r11,r27 - eor r12,r28 - eor r13,r29 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r9,r24 - eor r6,r25 - eor r7,r14 - eor r8,r15 - movw r14,r10 - movw r24,r12 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r13,r24 - eor r10,r25 - eor r11,r14 - eor r12,r15 - eor r20,r9 - eor r21,r6 - eor r22,r7 - eor r23,r8 - eor r26,r9 - eor r27,r6 - eor r28,r7 - eor r29,r8 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - std Z+16,r26 - std Z+17,r27 - std Z+18,r28 - std Z+19,r29 - std Z+32,r2 - std Z+33,r3 - std Z+34,r4 - std Z+35,r5 - ldd r20,Z+4 - ldd r21,Z+5 - ldd r22,Z+6 - ldd r23,Z+7 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r6,r20 - movw r8,r22 - eor r6,r26 - eor r7,r27 - eor r8,r28 - eor r9,r29 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r9,r24 - eor r6,r25 - eor r7,r14 - eor r8,r15 - eor r20,r13 - eor r21,r10 - eor r22,r11 - eor r23,r12 - eor r26,r13 - eor r27,r10 - eor r28,r11 - eor r29,r12 - eor r2,r13 - eor r3,r10 - eor r4,r11 - eor r5,r12 - std Z+4,r20 - std Z+5,r21 - std Z+6,r22 - std Z+7,r23 - std Z+20,r26 - std Z+21,r27 - std Z+22,r28 - std Z+23,r29 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - ldd r2,Z+40 - ldd r3,Z+41 - ldd r4,Z+42 - ldd r5,Z+43 - movw r10,r20 - movw r12,r22 - eor r10,r26 - eor r11,r27 - eor r12,r28 - eor r13,r29 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - movw r14,r10 - movw r24,r12 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r13,r24 - eor r10,r25 - eor r11,r14 - eor r12,r15 - eor r20,r9 - eor r21,r6 - eor r22,r7 - eor r23,r8 - eor r26,r9 - eor r27,r6 - eor r28,r7 - eor r29,r8 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - std Z+24,r26 - std Z+25,r27 - std Z+26,r28 - std Z+27,r29 - std Z+40,r2 - std Z+41,r3 - std Z+42,r4 - std Z+43,r5 - ldd r0,Z+12 - eor r0,r13 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r10 - std Z+13,r0 - ldd r0,Z+14 - eor r0,r11 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r12 - std Z+15,r0 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 - eor r6,r13 - eor r7,r10 - eor r8,r11 - eor r9,r12 - ldd r14,Z+44 - ldd r15,Z+45 - ldd r24,Z+46 - ldd r25,Z+47 - eor r14,r13 - eor r15,r10 - eor r24,r11 - eor r25,r12 - ldd r10,Z+24 - ldd r11,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+28,r10 - std Z+29,r11 - std Z+30,r12 - std Z+31,r13 - ldd r10,Z+20 - ldd r11,Z+21 - ldd r12,Z+22 - ldd r13,Z+23 - std Z+24,r10 - std Z+25,r11 - std Z+26,r12 - std Z+27,r13 - ldd r10,Z+16 - ldd r11,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - std Z+20,r10 - std Z+21,r11 - std Z+22,r12 - std Z+23,r13 - std Z+16,r6 - std Z+17,r7 - std Z+18,r8 - std Z+19,r9 - ldd r6,Z+32 - ldd r7,Z+33 - ldd r8,Z+34 - ldd r9,Z+35 - mov r0,r9 - mov r9,r8 - mov r8,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+32,r6 - std Z+33,r7 - std Z+34,r8 - std Z+35,r9 - ldd r6,Z+36 - ldd r7,Z+37 - ldd r8,Z+38 - ldd r9,Z+39 - mov r0,r9 - mov r9,r8 - mov r8,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+36,r6 - std Z+37,r7 - std Z+38,r8 - std Z+39,r9 - ldd r6,Z+40 - ldd r7,Z+41 - ldd r8,Z+42 - ldd r9,Z+43 - mov r0,r9 - mov r9,r8 - mov r8,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+40,r6 - std Z+41,r7 - std Z+42,r8 - std Z+43,r9 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - std Z+44,r14 - std Z+45,r15 - std Z+46,r24 - std Z+47,r25 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - eor r20,r18 - eor r21,r19 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+16,r26 - std Z+17,r27 - std Z+18,r28 - std Z+19,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+32,r2 - std Z+33,r3 - std Z+34,r4 - std Z+35,r5 - ldd r20,Z+4 - ldd r21,Z+5 - ldd r22,Z+6 - ldd r23,Z+7 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - std Z+4,r20 - std Z+5,r21 - std Z+6,r22 - std Z+7,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+20,r26 - std Z+21,r27 - std Z+22,r28 - std Z+23,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - ldd r2,Z+40 - ldd r3,Z+41 - ldd r4,Z+42 - ldd r5,Z+43 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+24,r26 - std Z+25,r27 - std Z+26,r28 - std Z+27,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+40,r2 - std Z+41,r3 - std Z+42,r4 - std Z+43,r5 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r22,Z+14 - ldd r23,Z+15 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - ldd r2,Z+44 - ldd r3,Z+45 - ldd r4,Z+46 - ldd r5,Z+47 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - std Z+12,r20 - std Z+13,r21 - std Z+14,r22 - std Z+15,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+28,r26 - std Z+29,r27 - std Z+30,r28 - std Z+31,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+44,r2 - std Z+45,r3 - std Z+46,r4 - std Z+47,r5 - ldd r6,Z+16 - ldd r7,Z+17 - ldd r8,Z+18 - ldd r9,Z+19 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+16,r6 - std Z+17,r7 - std Z+18,r8 - std Z+19,r9 - ldd r6,Z+20 - ldd r7,Z+21 - ldd r8,Z+22 - ldd r9,Z+23 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+20,r6 - std Z+21,r7 - std Z+22,r8 - std Z+23,r9 - ldd r6,Z+24 - ldd r7,Z+25 - ldd r8,Z+26 - ldd r9,Z+27 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+24,r6 - std Z+25,r7 - std Z+26,r8 - std Z+27,r9 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+28,r6 - std Z+29,r7 - std Z+30,r8 - std Z+31,r9 - ldd r6,Z+40 - ldd r7,Z+41 - ldd r8,Z+42 - ldd r9,Z+43 - ldd r10,Z+44 - ldd r11,Z+45 - ldd r12,Z+46 - ldd r13,Z+47 - ldd r14,Z+32 - ldd r15,Z+33 - ldd r24,Z+34 - ldd r25,Z+35 - std Z+40,r25 - std Z+41,r14 - std Z+42,r15 - std Z+43,r24 - ldd r14,Z+36 - ldd r15,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - std Z+44,r25 - std Z+45,r14 - std Z+46,r15 - std Z+47,r24 - std Z+32,r9 - std Z+33,r6 - std Z+34,r7 - std Z+35,r8 - std Z+36,r13 - std Z+37,r10 - std Z+38,r11 - std Z+39,r12 - ret -888: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size xoodoo_permute, .-xoodoo_permute - -#endif diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.c b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.c deleted file mode 100644 index 59bb8bf..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-xoodoo.h" - -#if !defined(__AVR__) - -void xoodoo_permute(xoodoo_state_t *state) -{ - static uint16_t const rc[XOODOO_ROUNDS] = { - 0x0058, 0x0038, 0x03C0, 0x00D0, 0x0120, 0x0014, - 0x0060, 0x002C, 0x0380, 0x00F0, 0x01A0, 0x0012 - }; - uint8_t round; - uint32_t x00, x01, x02, x03; - uint32_t x10, x11, x12, x13; - uint32_t x20, x21, x22, x23; - uint32_t t1, t2; - - /* Load the state and convert from little-endian byte order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x00 = state->S[0][0]; - x01 = state->S[0][1]; - x02 = state->S[0][2]; - x03 = state->S[0][3]; - x10 = state->S[1][0]; - x11 = state->S[1][1]; - x12 = state->S[1][2]; - x13 = state->S[1][3]; - x20 = state->S[2][0]; - x21 = state->S[2][1]; - x22 = state->S[2][2]; - x23 = state->S[2][3]; -#else - x00 = le_load_word32(state->B); - x01 = le_load_word32(state->B + 4); - x02 = le_load_word32(state->B + 8); - x03 = le_load_word32(state->B + 12); - x10 = le_load_word32(state->B + 16); - x11 = le_load_word32(state->B + 20); - x12 = le_load_word32(state->B + 24); - x13 = le_load_word32(state->B + 28); - x20 = le_load_word32(state->B + 32); - x21 = le_load_word32(state->B + 36); - x22 = le_load_word32(state->B + 40); - x23 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (round = 0; round < XOODOO_ROUNDS; ++round) { - /* Optimization ideas from the Xoodoo implementation here: - * https://github.com/XKCP/XKCP/tree/master/lib/low/Xoodoo/Optimized */ - - /* Step theta: Mix column parity */ - t1 = x03 ^ x13 ^ x23; - t2 = x00 ^ x10 ^ x20; - t1 = leftRotate5(t1) ^ leftRotate14(t1); - t2 = leftRotate5(t2) ^ leftRotate14(t2); - x00 ^= t1; - x10 ^= t1; - x20 ^= t1; - t1 = x01 ^ x11 ^ x21; - t1 = leftRotate5(t1) ^ leftRotate14(t1); - x01 ^= t2; - x11 ^= t2; - x21 ^= t2; - t2 = x02 ^ x12 ^ x22; - t2 = leftRotate5(t2) ^ leftRotate14(t2); - x02 ^= t1; - x12 ^= t1; - x22 ^= t1; - x03 ^= t2; - x13 ^= t2; - x23 ^= t2; - - /* Step rho-west: Plane shift */ - t1 = x13; - x13 = x12; - x12 = x11; - x11 = x10; - x10 = t1; - x20 = leftRotate11(x20); - x21 = leftRotate11(x21); - x22 = leftRotate11(x22); - x23 = leftRotate11(x23); - - /* Step iota: Add the round constant to the state */ - x00 ^= rc[round]; - - /* Step chi: Non-linear layer */ - x00 ^= (~x10) & x20; - x10 ^= (~x20) & x00; - x20 ^= (~x00) & x10; - x01 ^= (~x11) & x21; - x11 ^= (~x21) & x01; - x21 ^= (~x01) & x11; - x02 ^= (~x12) & x22; - x12 ^= (~x22) & x02; - x22 ^= (~x02) & x12; - x03 ^= (~x13) & x23; - x13 ^= (~x23) & x03; - x23 ^= (~x03) & x13; - - /* Step rho-east: Plane shift */ - x10 = leftRotate1(x10); - x11 = leftRotate1(x11); - x12 = leftRotate1(x12); - x13 = leftRotate1(x13); - t1 = leftRotate8(x22); - t2 = leftRotate8(x23); - x22 = leftRotate8(x20); - x23 = leftRotate8(x21); - x20 = t1; - x21 = t2; - } - - /* Convert back into little-endian and store to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0][0] = x00; - state->S[0][1] = x01; - state->S[0][2] = x02; - state->S[0][3] = x03; - state->S[1][0] = x10; - state->S[1][1] = x11; - state->S[1][2] = x12; - state->S[1][3] = x13; - state->S[2][0] = x20; - state->S[2][1] = x21; - state->S[2][2] = x22; - state->S[2][3] = x23; -#else - le_store_word32(state->B, x00); - le_store_word32(state->B + 4, x01); - le_store_word32(state->B + 8, x02); - le_store_word32(state->B + 12, x03); - le_store_word32(state->B + 16, x10); - le_store_word32(state->B + 20, x11); - le_store_word32(state->B + 24, x12); - le_store_word32(state->B + 28, x13); - le_store_word32(state->B + 32, x20); - le_store_word32(state->B + 36, x21); - le_store_word32(state->B + 40, x22); - le_store_word32(state->B + 44, x23); -#endif -} - -#endif /* !__AVR__ */ diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.h b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.h deleted file mode 100644 index f6eddd8..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/internal-xoodoo.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_XOODOO_H -#define LW_INTERNAL_XOODOO_H - -#include "internal-util.h" - -/** - * \file internal-xoodoo.h - * \brief Internal implementation of the Xoodoo permutation. - * - * References: https://keccak.team/xoodyak.html - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Number of rows in the Xoodoo state. - */ -#define XOODOO_ROWS 3 - -/** - * \brief Number of columns in the Xoodoo state. - */ -#define XOODOO_COLS 4 - -/** - * \brief Number of rounds for the Xoodoo permutation. - */ -#define XOODOO_ROUNDS 12 - -/** - * \brief State information for the Xoodoo permutation. - */ -typedef union -{ - /** Words of the state */ - uint32_t S[XOODOO_ROWS][XOODOO_COLS]; - - /** Bytes of the state */ - uint8_t B[XOODOO_ROWS * XOODOO_COLS * sizeof(uint32_t)]; - -} xoodoo_state_t; - -/** - * \brief Permutes the Xoodoo state. - * - * \param state The Xoodoo state. - * - * The state will be in little-endian before and after the operation. - */ -void xoodoo_permute(xoodoo_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.c b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.c deleted file mode 100644 index 4ad4fce..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.c +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "xoodyak.h" -#include "internal-xoodoo.h" -#include - -aead_cipher_t const xoodyak_cipher = { - "Xoodyak", - XOODYAK_KEY_SIZE, - XOODYAK_NONCE_SIZE, - XOODYAK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - xoodyak_aead_encrypt, - xoodyak_aead_decrypt -}; - -aead_hash_algorithm_t const xoodyak_hash_algorithm = { - "Xoodyak-Hash", - sizeof(xoodyak_hash_state_t), - XOODYAK_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - xoodyak_hash, - (aead_hash_init_t)xoodyak_hash_init, - (aead_hash_update_t)xoodyak_hash_absorb, - (aead_hash_finalize_t)xoodyak_hash_finalize, - (aead_xof_absorb_t)xoodyak_hash_absorb, - (aead_xof_squeeze_t)xoodyak_hash_squeeze -}; - -/** - * \brief Rate for absorbing data into the sponge state. - */ -#define XOODYAK_ABSORB_RATE 44 - -/** - * \brief Rate for squeezing data out of the sponge. - */ -#define XOODYAK_SQUEEZE_RATE 24 - -/** - * \brief Rate for absorbing and squeezing in hashing mode. - */ -#define XOODYAK_HASH_RATE 16 - -/** - * \brief Phase identifier for "up" mode, which indicates that a block - * permutation has just been performed. - */ -#define XOODYAK_PHASE_UP 0 - -/** - * \brief Phase identifier for "down" mode, which indicates that data has - * been absorbed but that a block permutation has not been done yet. - */ -#define XOODYAK_PHASE_DOWN 1 - -/** - * \brief Absorbs data into the Xoodoo permutation state. - * - * \param state Xoodoo permutation state. - * \param phase Points to the current phase, up or down. - * \param data Points to the data to be absorbed. - * \param len Length of the data to be absorbed. - */ -static void xoodyak_absorb - (xoodoo_state_t *state, uint8_t *phase, - const unsigned char *data, unsigned long long len) -{ - uint8_t domain = 0x03; - unsigned temp; - while (len > XOODYAK_ABSORB_RATE) { - if (*phase != XOODYAK_PHASE_UP) - xoodoo_permute(state); - lw_xor_block(state->B, data, XOODYAK_ABSORB_RATE); - state->B[XOODYAK_ABSORB_RATE] ^= 0x01; /* Padding */ - state->B[sizeof(state->B) - 1] ^= domain; - data += XOODYAK_ABSORB_RATE; - len -= XOODYAK_ABSORB_RATE; - domain = 0x00; - *phase = XOODYAK_PHASE_DOWN; - } - temp = (unsigned)len; - if (*phase != XOODYAK_PHASE_UP) - xoodoo_permute(state); - lw_xor_block(state->B, data, temp); - state->B[temp] ^= 0x01; /* Padding */ - state->B[sizeof(state->B) - 1] ^= domain; - *phase = XOODYAK_PHASE_DOWN; -} - -int xoodyak_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - xoodoo_state_t state; - uint8_t phase, domain; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + XOODYAK_TAG_SIZE; - - /* Initialize the state with the key */ - memcpy(state.B, k, XOODYAK_KEY_SIZE); - memset(state.B + XOODYAK_KEY_SIZE, 0, sizeof(state.B) - XOODYAK_KEY_SIZE); - state.B[XOODYAK_KEY_SIZE + 1] = 0x01; /* Padding */ - state.B[sizeof(state.B) - 1] = 0x02; /* Domain separation */ - phase = XOODYAK_PHASE_DOWN; - - /* Absorb the nonce and associated data */ - xoodyak_absorb(&state, &phase, npub, XOODYAK_NONCE_SIZE); - xoodyak_absorb(&state, &phase, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - domain = 0x80; - while (mlen > XOODYAK_SQUEEZE_RATE) { - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - lw_xor_block_2_dest(c, state.B, m, XOODYAK_SQUEEZE_RATE); - state.B[XOODYAK_SQUEEZE_RATE] ^= 0x01; /* Padding */ - c += XOODYAK_SQUEEZE_RATE; - m += XOODYAK_SQUEEZE_RATE; - mlen -= XOODYAK_SQUEEZE_RATE; - domain = 0; - } - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state.B, m, temp); - state.B[temp] ^= 0x01; /* Padding */ - c += temp; - - /* Generate the authentication tag */ - state.B[sizeof(state.B) - 1] ^= 0x40; /* Domain separation */ - xoodoo_permute(&state); - memcpy(c, state.B, XOODYAK_TAG_SIZE); - return 0; -} - -int xoodyak_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - xoodoo_state_t state; - uint8_t phase, domain; - unsigned temp; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < XOODYAK_TAG_SIZE) - return -1; - *mlen = clen - XOODYAK_TAG_SIZE; - - /* Initialize the state with the key */ - memcpy(state.B, k, XOODYAK_KEY_SIZE); - memset(state.B + XOODYAK_KEY_SIZE, 0, sizeof(state.B) - XOODYAK_KEY_SIZE); - state.B[XOODYAK_KEY_SIZE + 1] = 0x01; /* Padding */ - state.B[sizeof(state.B) - 1] = 0x02; /* Domain separation */ - phase = XOODYAK_PHASE_DOWN; - - /* Absorb the nonce and associated data */ - xoodyak_absorb(&state, &phase, npub, XOODYAK_NONCE_SIZE); - xoodyak_absorb(&state, &phase, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - domain = 0x80; - clen -= XOODYAK_TAG_SIZE; - while (clen > XOODYAK_SQUEEZE_RATE) { - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - lw_xor_block_swap(m, state.B, c, XOODYAK_SQUEEZE_RATE); - state.B[XOODYAK_SQUEEZE_RATE] ^= 0x01; /* Padding */ - c += XOODYAK_SQUEEZE_RATE; - m += XOODYAK_SQUEEZE_RATE; - clen -= XOODYAK_SQUEEZE_RATE; - domain = 0; - } - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - temp = (unsigned)clen; - lw_xor_block_swap(m, state.B, c, temp); - state.B[temp] ^= 0x01; /* Padding */ - c += temp; - - /* Check the authentication tag */ - state.B[sizeof(state.B) - 1] ^= 0x40; /* Domain separation */ - xoodoo_permute(&state); - return aead_check_tag(mtemp, *mlen, state.B, c, XOODYAK_TAG_SIZE); -} - -int xoodyak_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - xoodyak_hash_state_t state; - xoodyak_hash_init(&state); - xoodyak_hash_absorb(&state, in, inlen); - xoodyak_hash_squeeze(&state, out, XOODYAK_HASH_SIZE); - return 0; -} - -#define XOODYAK_HASH_MODE_INIT_ABSORB 0 -#define XOODYAK_HASH_MODE_ABSORB 1 -#define XOODYAK_HASH_MODE_SQUEEZE 2 - -#define xoodoo_hash_permute(state) \ - xoodoo_permute((xoodoo_state_t *)((state)->s.state)) - -void xoodyak_hash_init(xoodyak_hash_state_t *state) -{ - memset(state, 0, sizeof(xoodyak_hash_state_t)); - state->s.mode = XOODYAK_HASH_MODE_INIT_ABSORB; -} - -void xoodyak_hash_absorb - (xoodyak_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - uint8_t domain; - unsigned temp; - - /* If we were squeezing, then restart the absorb phase */ - if (state->s.mode == XOODYAK_HASH_MODE_SQUEEZE) { - xoodoo_hash_permute(state); - state->s.mode = XOODYAK_HASH_MODE_INIT_ABSORB; - state->s.count = 0; - } - - /* The first block needs a different domain separator to the others */ - domain = (state->s.mode == XOODYAK_HASH_MODE_INIT_ABSORB) ? 0x01 : 0x00; - - /* Absorb the input data into the state */ - while (inlen > 0) { - if (state->s.count >= XOODYAK_HASH_RATE) { - state->s.state[XOODYAK_HASH_RATE] ^= 0x01; /* Padding */ - state->s.state[sizeof(state->s.state) - 1] ^= domain; - xoodoo_hash_permute(state); - state->s.mode = XOODYAK_HASH_MODE_ABSORB; - state->s.count = 0; - domain = 0x00; - } - temp = XOODYAK_HASH_RATE - state->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void xoodyak_hash_squeeze - (xoodyak_hash_state_t *state, unsigned char *out, - unsigned long long outlen) -{ - uint8_t domain; - unsigned temp; - - /* If we were absorbing, then terminate the absorb phase */ - if (state->s.mode != XOODYAK_HASH_MODE_SQUEEZE) { - domain = (state->s.mode == XOODYAK_HASH_MODE_INIT_ABSORB) ? 0x01 : 0x00; - state->s.state[state->s.count] ^= 0x01; /* Padding */ - state->s.state[sizeof(state->s.state) - 1] ^= domain; - xoodoo_hash_permute(state); - state->s.mode = XOODYAK_HASH_MODE_SQUEEZE; - state->s.count = 0; - } - - /* Squeeze data out of the state */ - while (outlen > 0) { - if (state->s.count >= XOODYAK_HASH_RATE) { - /* Padding is always at index 0 for squeezing subsequent - * blocks because the number of bytes we have absorbed - * since the previous block was squeezed out is zero */ - state->s.state[0] ^= 0x01; - xoodoo_hash_permute(state); - state->s.count = 0; - } - temp = XOODYAK_HASH_RATE - state->s.count; - if (temp > outlen) - temp = (unsigned)outlen; - memcpy(out, state->s.state + state->s.count, temp); - state->s.count += temp; - out += temp; - outlen -= temp; - } -} - -void xoodyak_hash_finalize - (xoodyak_hash_state_t *state, unsigned char *out) -{ - xoodyak_hash_squeeze(state, out, XOODYAK_HASH_SIZE); -} diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.h b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.h deleted file mode 100644 index f4777d5..0000000 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys-avr/xoodyak.h +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_XOODYAK_H -#define LWCRYPTO_XOODYAK_H - -#include "aead-common.h" - -/** - * \file xoodyak.h - * \brief Xoodyak authenticated encryption algorithm. - * - * Xoodyak is an authenticated encryption and hash algorithm pair based - * around the 384-bit Xoodoo permutation that is similar in structure to - * Keccak but is more efficient than Keccak on 32-bit embedded devices. - * The Cyclist mode of operation is used to convert the permutation - * into a sponge for the higher-level algorithms. - * - * The Xoodyak encryption mode has a 128-bit key, a 128-bit nonce, - * and a 128-bit authentication tag. The Xoodyak hashing mode has a - * 256-bit fixed hash output and can also be used as an extensible - * output function (XOF). - * - * The Xoodyak specification describes a re-keying mechanism where the - * key for one packet is used to derive the key to use on the next packet. - * This provides some resistance against side channel attacks by making - * the session key a moving target. This library does not currently - * implement re-keying. - * - * References: https://keccak.team/xoodyak.html - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Xoodyak. - */ -#define XOODYAK_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Xoodyak. - */ -#define XOODYAK_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Xoodyak. - */ -#define XOODYAK_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for Xoodyak. - */ -#define XOODYAK_HASH_SIZE 32 - -/** - * \brief State information for Xoodyak incremental hashing modes. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: absorb or squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} xoodyak_hash_state_t; - -/** - * \brief Meta-information block for the Xoodyak cipher. - */ -extern aead_cipher_t const xoodyak_cipher; - -/** - * \brief Meta-information block for the Xoodyak hash algorithm. - */ -extern aead_hash_algorithm_t const xoodyak_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with Xoodyak. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa xoodyak_aead_decrypt() - */ -int xoodyak_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Xoodyak. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa xoodyak_aead_encrypt() - */ -int xoodyak_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Xoodyak to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * XOODYAK_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int xoodyak_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a Xoodyak hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa xoodyak_hash_absorb(), xoodyak_hash_squeeze(), xoodyak_hash() - */ -void xoodyak_hash_init(xoodyak_hash_state_t *state); - -/** - * \brief Aborbs more input data into a Xoodyak hashing state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa xoodyak_hash_init(), xoodyak_hash_squeeze() - */ -void xoodyak_hash_absorb - (xoodyak_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from a Xoodyak hashing state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa xoodyak_hash_init(), xoodyak_hash_absorb() - */ -void xoodyak_hash_squeeze - (xoodyak_hash_state_t *state, unsigned char *out, - unsigned long long outlen); - -/** - * \brief Returns the final hash value from a Xoodyak hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - * - * \note This is a wrapper around xoodyak_hash_squeeze() for a fixed length - * of XOODYAK_HASH_SIZE bytes. - * - * \sa xoodyak_hash_init(), xoodyak_hash_absorb() - */ -void xoodyak_hash_finalize - (xoodyak_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-util.h b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-util.h index e79158c..e30166d 100644 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-util.h +++ b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-util.h @@ -238,6 +238,17 @@ } \ } while (0) +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + /* Rotation macros for 32-bit arguments */ /* Generic left rotate */ @@ -254,6 +265,8 @@ (_temp >> (bits)) | (_temp << (32 - (bits))); \ })) +#if !LW_CRYPTO_ROTATE32_COMPOSED + /* Left rotate by a specific number of bits. These macros may be replaced * with more efficient ones on platforms that lack a barrel shifter */ #define leftRotate1(a) (leftRotate((a), 1)) @@ -322,6 +335,138 @@ #define rightRotate30(a) (rightRotate((a), 30)) #define rightRotate31(a) (rightRotate((a), 31)) +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + /* Rotation macros for 64-bit arguments */ /* Generic left rotate */ diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo-avr.S b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo-avr.S new file mode 100644 index 0000000..629c19d --- /dev/null +++ b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo-avr.S @@ -0,0 +1,935 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global xoodoo_permute + .type xoodoo_permute, @function +xoodoo_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 +.L__stack_usage = 16 + ldi r18,88 + mov r19,r1 + rcall 34f + ldi r18,56 + rcall 34f + ldi r18,192 + ldi r19,3 + rcall 34f + ldi r18,208 + mov r19,r1 + rcall 34f + ldi r18,32 + ldi r19,1 + rcall 34f + ldi r18,20 + mov r19,r1 + rcall 34f + ldi r18,96 + rcall 34f + ldi r18,44 + rcall 34f + ldi r18,128 + ldi r19,3 + rcall 34f + ldi r18,240 + mov r19,r1 + rcall 34f + ldi r18,160 + ldi r19,1 + rcall 34f + ldi r18,18 + mov r19,r1 + rcall 34f + rjmp 888f +34: + ldd r6,Z+12 + ldd r7,Z+13 + ldd r8,Z+14 + ldd r9,Z+15 + ldd r0,Z+28 + eor r6,r0 + ldd r0,Z+29 + eor r7,r0 + ldd r0,Z+30 + eor r8,r0 + ldd r0,Z+31 + eor r9,r0 + ldd r0,Z+44 + eor r6,r0 + ldd r0,Z+45 + eor r7,r0 + ldd r0,Z+46 + eor r8,r0 + ldd r0,Z+47 + eor r9,r0 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + movw r10,r20 + movw r12,r22 + eor r10,r26 + eor r11,r27 + eor r12,r28 + eor r13,r29 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r9,r24 + eor r6,r25 + eor r7,r14 + eor r8,r15 + movw r14,r10 + movw r24,r12 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r13,r24 + eor r10,r25 + eor r11,r14 + eor r12,r15 + eor r20,r9 + eor r21,r6 + eor r22,r7 + eor r23,r8 + eor r26,r9 + eor r27,r6 + eor r28,r7 + eor r29,r8 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + std Z+16,r26 + std Z+17,r27 + std Z+18,r28 + std Z+19,r29 + std Z+32,r2 + std Z+33,r3 + std Z+34,r4 + std Z+35,r5 + ldd r20,Z+4 + ldd r21,Z+5 + ldd r22,Z+6 + ldd r23,Z+7 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r6,r20 + movw r8,r22 + eor r6,r26 + eor r7,r27 + eor r8,r28 + eor r9,r29 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r9,r24 + eor r6,r25 + eor r7,r14 + eor r8,r15 + eor r20,r13 + eor r21,r10 + eor r22,r11 + eor r23,r12 + eor r26,r13 + eor r27,r10 + eor r28,r11 + eor r29,r12 + eor r2,r13 + eor r3,r10 + eor r4,r11 + eor r5,r12 + std Z+4,r20 + std Z+5,r21 + std Z+6,r22 + std Z+7,r23 + std Z+20,r26 + std Z+21,r27 + std Z+22,r28 + std Z+23,r29 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + ldd r2,Z+40 + ldd r3,Z+41 + ldd r4,Z+42 + ldd r5,Z+43 + movw r10,r20 + movw r12,r22 + eor r10,r26 + eor r11,r27 + eor r12,r28 + eor r13,r29 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + movw r14,r10 + movw r24,r12 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r13,r24 + eor r10,r25 + eor r11,r14 + eor r12,r15 + eor r20,r9 + eor r21,r6 + eor r22,r7 + eor r23,r8 + eor r26,r9 + eor r27,r6 + eor r28,r7 + eor r29,r8 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + std Z+24,r26 + std Z+25,r27 + std Z+26,r28 + std Z+27,r29 + std Z+40,r2 + std Z+41,r3 + std Z+42,r4 + std Z+43,r5 + ldd r0,Z+12 + eor r0,r13 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r10 + std Z+13,r0 + ldd r0,Z+14 + eor r0,r11 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r12 + std Z+15,r0 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 + eor r6,r13 + eor r7,r10 + eor r8,r11 + eor r9,r12 + ldd r14,Z+44 + ldd r15,Z+45 + ldd r24,Z+46 + ldd r25,Z+47 + eor r14,r13 + eor r15,r10 + eor r24,r11 + eor r25,r12 + ldd r10,Z+24 + ldd r11,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+28,r10 + std Z+29,r11 + std Z+30,r12 + std Z+31,r13 + ldd r10,Z+20 + ldd r11,Z+21 + ldd r12,Z+22 + ldd r13,Z+23 + std Z+24,r10 + std Z+25,r11 + std Z+26,r12 + std Z+27,r13 + ldd r10,Z+16 + ldd r11,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + std Z+20,r10 + std Z+21,r11 + std Z+22,r12 + std Z+23,r13 + std Z+16,r6 + std Z+17,r7 + std Z+18,r8 + std Z+19,r9 + ldd r6,Z+32 + ldd r7,Z+33 + ldd r8,Z+34 + ldd r9,Z+35 + mov r0,r9 + mov r9,r8 + mov r8,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+32,r6 + std Z+33,r7 + std Z+34,r8 + std Z+35,r9 + ldd r6,Z+36 + ldd r7,Z+37 + ldd r8,Z+38 + ldd r9,Z+39 + mov r0,r9 + mov r9,r8 + mov r8,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+36,r6 + std Z+37,r7 + std Z+38,r8 + std Z+39,r9 + ldd r6,Z+40 + ldd r7,Z+41 + ldd r8,Z+42 + ldd r9,Z+43 + mov r0,r9 + mov r9,r8 + mov r8,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+40,r6 + std Z+41,r7 + std Z+42,r8 + std Z+43,r9 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + std Z+44,r14 + std Z+45,r15 + std Z+46,r24 + std Z+47,r25 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + eor r20,r18 + eor r21,r19 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+16,r26 + std Z+17,r27 + std Z+18,r28 + std Z+19,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+32,r2 + std Z+33,r3 + std Z+34,r4 + std Z+35,r5 + ldd r20,Z+4 + ldd r21,Z+5 + ldd r22,Z+6 + ldd r23,Z+7 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + std Z+4,r20 + std Z+5,r21 + std Z+6,r22 + std Z+7,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+20,r26 + std Z+21,r27 + std Z+22,r28 + std Z+23,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + ldd r2,Z+40 + ldd r3,Z+41 + ldd r4,Z+42 + ldd r5,Z+43 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+24,r26 + std Z+25,r27 + std Z+26,r28 + std Z+27,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+40,r2 + std Z+41,r3 + std Z+42,r4 + std Z+43,r5 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r22,Z+14 + ldd r23,Z+15 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + ldd r2,Z+44 + ldd r3,Z+45 + ldd r4,Z+46 + ldd r5,Z+47 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + std Z+12,r20 + std Z+13,r21 + std Z+14,r22 + std Z+15,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+28,r26 + std Z+29,r27 + std Z+30,r28 + std Z+31,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+44,r2 + std Z+45,r3 + std Z+46,r4 + std Z+47,r5 + ldd r6,Z+16 + ldd r7,Z+17 + ldd r8,Z+18 + ldd r9,Z+19 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+16,r6 + std Z+17,r7 + std Z+18,r8 + std Z+19,r9 + ldd r6,Z+20 + ldd r7,Z+21 + ldd r8,Z+22 + ldd r9,Z+23 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+20,r6 + std Z+21,r7 + std Z+22,r8 + std Z+23,r9 + ldd r6,Z+24 + ldd r7,Z+25 + ldd r8,Z+26 + ldd r9,Z+27 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+24,r6 + std Z+25,r7 + std Z+26,r8 + std Z+27,r9 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+28,r6 + std Z+29,r7 + std Z+30,r8 + std Z+31,r9 + ldd r6,Z+40 + ldd r7,Z+41 + ldd r8,Z+42 + ldd r9,Z+43 + ldd r10,Z+44 + ldd r11,Z+45 + ldd r12,Z+46 + ldd r13,Z+47 + ldd r14,Z+32 + ldd r15,Z+33 + ldd r24,Z+34 + ldd r25,Z+35 + std Z+40,r25 + std Z+41,r14 + std Z+42,r15 + std Z+43,r24 + ldd r14,Z+36 + ldd r15,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + std Z+44,r25 + std Z+45,r14 + std Z+46,r15 + std Z+47,r24 + std Z+32,r9 + std Z+33,r6 + std Z+34,r7 + std Z+35,r8 + std Z+36,r13 + std Z+37,r10 + std Z+38,r11 + std Z+39,r12 + ret +888: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size xoodoo_permute, .-xoodoo_permute + +#endif diff --git a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo.c b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo.c index f129833..59bb8bf 100644 --- a/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo.c +++ b/xoodyak/Implementations/crypto_aead/xoodyakv1/rhys/internal-xoodoo.c @@ -22,6 +22,8 @@ #include "internal-xoodoo.h" +#if !defined(__AVR__) + void xoodoo_permute(xoodoo_state_t *state) { static uint16_t const rc[XOODOO_ROUNDS] = { @@ -160,3 +162,5 @@ void xoodoo_permute(xoodoo_state_t *state) le_store_word32(state->B + 44, x23); #endif } + +#endif /* !__AVR__ */ diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.c deleted file mode 100644 index 84fc53a..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "aead-common.h" - -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = (accum - 1) >> 8; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} - -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned size, int precheck) -{ - /* Set "accum" to -1 if the tags match, or 0 if they don't match */ - int accum = 0; - while (size > 0) { - accum |= (*tag1++ ^ *tag2++); - --size; - } - accum = ((accum - 1) >> 8) & precheck; - - /* Destroy the plaintext if the tag match failed */ - while (plaintext_len > 0) { - *plaintext++ &= accum; - --plaintext_len; - } - - /* If "accum" is 0, return -1, otherwise return 0 */ - return ~accum; -} diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.h deleted file mode 100644 index 2be95eb..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/aead-common.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_AEAD_COMMON_H -#define LWCRYPTO_AEAD_COMMON_H - -#include - -/** - * \file aead-common.h - * \brief Definitions that are common across AEAD schemes. - * - * AEAD stands for "Authenticated Encryption with Associated Data". - * It is a standard API pattern for securely encrypting and - * authenticating packets of data. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Encrypts and authenticates a packet with an AEAD scheme. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - */ -typedef int (*aead_cipher_encrypt_t) - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with an AEAD scheme. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - normally not used by AEAD schemes. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet. - * \param k Points to the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - */ -typedef int (*aead_cipher_decrypt_t) - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data. - * - * \param out Buffer to receive the hash output. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -typedef int (*aead_hash_t) - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a hashing operation. - * - * \param state Hash state to be initialized. - */ -typedef void (*aead_hash_init_t)(void *state); - -/** - * \brief Updates a hash state with more input data. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be incorporated into the state. - * \param inlen Length of the input data to be incorporated into the state. - */ -typedef void (*aead_hash_update_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Returns the final hash value from a hashing operation. - * - * \param Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - */ -typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); - -/** - * \brief Aborbs more input data into an XOF state. - * - * \param state XOF state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa ascon_xof_init(), ascon_xof_squeeze() - */ -typedef void (*aead_xof_absorb_t) - (void *state, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Squeezes output data from an XOF state. - * - * \param state XOF state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - */ -typedef void (*aead_xof_squeeze_t) - (void *state, unsigned char *out, unsigned long long outlen); - -/** - * \brief No special AEAD features. - */ -#define AEAD_FLAG_NONE 0x0000 - -/** - * \brief The natural byte order of the AEAD cipher is little-endian. - * - * If this flag is not present, then the natural byte order of the - * AEAD cipher should be assumed to be big-endian. - * - * The natural byte order may be useful when formatting packet sequence - * numbers as nonces. The application needs to know whether the sequence - * number should be packed into the leading or trailing bytes of the nonce. - */ -#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 - -/** - * \brief Meta-information about an AEAD cipher. - */ -typedef struct -{ - const char *name; /**< Name of the cipher */ - unsigned key_len; /**< Length of the key in bytes */ - unsigned nonce_len; /**< Length of the nonce in bytes */ - unsigned tag_len; /**< Length of the tag in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ - aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ - -} aead_cipher_t; - -/** - * \brief Meta-information about a hash algorithm that is related to an AEAD. - * - * Regular hash algorithms should provide the "hash", "init", "update", - * and "finalize" functions. Extensible Output Functions (XOF's) should - * proivde the "hash", "init", "absorb", and "squeeze" functions. - */ -typedef struct -{ - const char *name; /**< Name of the hash algorithm */ - size_t state_size; /**< Size of the incremental state structure */ - unsigned hash_len; /**< Length of the hash in bytes */ - unsigned flags; /**< Flags for extra features */ - aead_hash_t hash; /**< All in one hashing function */ - aead_hash_init_t init; /**< Incremental hash/XOF init function */ - aead_hash_update_t update; /**< Incremental hash update function */ - aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ - aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ - aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ - -} aead_hash_algorithm_t; - -/** - * \brief Check an authentication tag in constant time. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - */ -int aead_check_tag - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len); - -/** - * \brief Check an authentication tag in constant time with a previous check. - * - * \param plaintext Points to the plaintext data. - * \param plaintext_len Length of the plaintext in bytes. - * \param tag1 First tag to compare. - * \param tag2 Second tag to compare. - * \param tag_len Length of the tags in bytes. - * \param precheck Set to -1 if previous check succeeded or 0 if it failed. - * - * \return Returns -1 if the tag check failed or 0 if the check succeeded. - * - * If the tag check fails, then the \a plaintext will also be zeroed to - * prevent it from being used accidentally by the application when the - * ciphertext was invalid. - * - * This version can be used to incorporate other information about the - * correctness of the plaintext into the final result. - */ -int aead_check_tag_precheck - (unsigned char *plaintext, unsigned long long plaintext_len, - const unsigned char *tag1, const unsigned char *tag2, - unsigned tag_len, int precheck); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/api.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/api.h deleted file mode 100644 index ae8c7f6..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/api.h +++ /dev/null @@ -1 +0,0 @@ -#define CRYPTO_BYTES 32 diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/hash.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/hash.c deleted file mode 100644 index 34d3b1c..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/hash.c +++ /dev/null @@ -1,8 +0,0 @@ - -#include "xoodyak.h" - -int crypto_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - return xoodyak_hash(out, in, inlen); -} diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-util.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-util.h deleted file mode 100644 index e30166d..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-util.h +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_UTIL_H -#define LW_INTERNAL_UTIL_H - -#include - -/* Figure out how to inline functions using this C compiler */ -#if defined(__STDC__) && __STDC_VERSION__ >= 199901L -#define STATIC_INLINE static inline -#elif defined(__GNUC__) || defined(__clang__) -#define STATIC_INLINE static __inline__ -#else -#define STATIC_INLINE static -#endif - -/* Try to figure out whether the CPU is little-endian or big-endian. - * May need to modify this to include new compiler-specific defines. - * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your - * compiler flags when you compile this library */ -#if defined(__x86_64) || defined(__x86_64__) || \ - defined(__i386) || defined(__i386__) || \ - defined(__AVR__) || defined(__arm) || defined(__arm__) || \ - defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ - defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ - defined(__LITTLE_ENDIAN__) -#define LW_UTIL_LITTLE_ENDIAN 1 -#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ - defined(__BIG_ENDIAN__) -/* Big endian */ -#else -#error "Cannot determine the endianess of this platform" -#endif - -/* Helper macros to load and store values while converting endian-ness */ - -/* Load a big-endian 32-bit word from a byte buffer */ -#define be_load_word32(ptr) \ - ((((uint32_t)((ptr)[0])) << 24) | \ - (((uint32_t)((ptr)[1])) << 16) | \ - (((uint32_t)((ptr)[2])) << 8) | \ - ((uint32_t)((ptr)[3]))) - -/* Store a big-endian 32-bit word into a byte buffer */ -#define be_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 24); \ - (ptr)[1] = (uint8_t)(_x >> 16); \ - (ptr)[2] = (uint8_t)(_x >> 8); \ - (ptr)[3] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 32-bit word from a byte buffer */ -#define le_load_word32(ptr) \ - ((((uint32_t)((ptr)[3])) << 24) | \ - (((uint32_t)((ptr)[2])) << 16) | \ - (((uint32_t)((ptr)[1])) << 8) | \ - ((uint32_t)((ptr)[0]))) - -/* Store a little-endian 32-bit word into a byte buffer */ -#define le_store_word32(ptr, x) \ - do { \ - uint32_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - } while (0) - -/* Load a big-endian 64-bit word from a byte buffer */ -#define be_load_word64(ptr) \ - ((((uint64_t)((ptr)[0])) << 56) | \ - (((uint64_t)((ptr)[1])) << 48) | \ - (((uint64_t)((ptr)[2])) << 40) | \ - (((uint64_t)((ptr)[3])) << 32) | \ - (((uint64_t)((ptr)[4])) << 24) | \ - (((uint64_t)((ptr)[5])) << 16) | \ - (((uint64_t)((ptr)[6])) << 8) | \ - ((uint64_t)((ptr)[7]))) - -/* Store a big-endian 64-bit word into a byte buffer */ -#define be_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 56); \ - (ptr)[1] = (uint8_t)(_x >> 48); \ - (ptr)[2] = (uint8_t)(_x >> 40); \ - (ptr)[3] = (uint8_t)(_x >> 32); \ - (ptr)[4] = (uint8_t)(_x >> 24); \ - (ptr)[5] = (uint8_t)(_x >> 16); \ - (ptr)[6] = (uint8_t)(_x >> 8); \ - (ptr)[7] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 64-bit word from a byte buffer */ -#define le_load_word64(ptr) \ - ((((uint64_t)((ptr)[7])) << 56) | \ - (((uint64_t)((ptr)[6])) << 48) | \ - (((uint64_t)((ptr)[5])) << 40) | \ - (((uint64_t)((ptr)[4])) << 32) | \ - (((uint64_t)((ptr)[3])) << 24) | \ - (((uint64_t)((ptr)[2])) << 16) | \ - (((uint64_t)((ptr)[1])) << 8) | \ - ((uint64_t)((ptr)[0]))) - -/* Store a little-endian 64-bit word into a byte buffer */ -#define le_store_word64(ptr, x) \ - do { \ - uint64_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - (ptr)[2] = (uint8_t)(_x >> 16); \ - (ptr)[3] = (uint8_t)(_x >> 24); \ - (ptr)[4] = (uint8_t)(_x >> 32); \ - (ptr)[5] = (uint8_t)(_x >> 40); \ - (ptr)[6] = (uint8_t)(_x >> 48); \ - (ptr)[7] = (uint8_t)(_x >> 56); \ - } while (0) - -/* Load a big-endian 16-bit word from a byte buffer */ -#define be_load_word16(ptr) \ - ((((uint16_t)((ptr)[0])) << 8) | \ - ((uint16_t)((ptr)[1]))) - -/* Store a big-endian 16-bit word into a byte buffer */ -#define be_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)(_x >> 8); \ - (ptr)[1] = (uint8_t)_x; \ - } while (0) - -/* Load a little-endian 16-bit word from a byte buffer */ -#define le_load_word16(ptr) \ - ((((uint16_t)((ptr)[1])) << 8) | \ - ((uint16_t)((ptr)[0]))) - -/* Store a little-endian 16-bit word into a byte buffer */ -#define le_store_word16(ptr, x) \ - do { \ - uint16_t _x = (x); \ - (ptr)[0] = (uint8_t)_x; \ - (ptr)[1] = (uint8_t)(_x >> 8); \ - } while (0) - -/* XOR a source byte buffer against a destination */ -#define lw_xor_block(dest, src, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ ^= *_src++; \ - --_len; \ - } \ - } while (0) - -/* XOR two source byte buffers and put the result in a destination buffer */ -#define lw_xor_block_2_src(dest, src1, src2, len) \ - do { \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest++ = *_src1++ ^ *_src2++; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time */ -#define lw_xor_block_2_dest(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - *_dest2++ = (*_dest++ ^= *_src++); \ - --_len; \ - } \ - } while (0) - -/* XOR two byte buffers and write to a destination which at the same - * time copying the contents of src2 to dest2 */ -#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src1 = (src1); \ - const unsigned char *_src2 = (src2); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src2++; \ - *_dest2++ = _temp; \ - *_dest++ = *_src1++ ^ _temp; \ - --_len; \ - } \ - } while (0) - -/* XOR a source byte buffer against a destination and write to another - * destination at the same time. This version swaps the source value - * into the "dest" buffer */ -#define lw_xor_block_swap(dest2, dest, src, len) \ - do { \ - unsigned char *_dest2 = (dest2); \ - unsigned char *_dest = (dest); \ - const unsigned char *_src = (src); \ - unsigned _len = (len); \ - while (_len > 0) { \ - unsigned char _temp = *_src++; \ - *_dest2++ = *_dest ^ _temp; \ - *_dest++ = _temp; \ - --_len; \ - } \ - } while (0) - -/* Rotation functions need to be optimised for best performance on AVR. - * The most efficient rotations are where the number of bits is 1 or a - * multiple of 8, so we compose the efficient rotations to produce all - * other rotation counts of interest. */ - -#if defined(__AVR__) -#define LW_CRYPTO_ROTATE32_COMPOSED 1 -#else -#define LW_CRYPTO_ROTATE32_COMPOSED 0 -#endif - -/* Rotation macros for 32-bit arguments */ - -/* Generic left rotate */ -#define leftRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (32 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate(a, bits) \ - (__extension__ ({ \ - uint32_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (32 - (bits))); \ - })) - -#if !LW_CRYPTO_ROTATE32_COMPOSED - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1(a) (leftRotate((a), 1)) -#define leftRotate2(a) (leftRotate((a), 2)) -#define leftRotate3(a) (leftRotate((a), 3)) -#define leftRotate4(a) (leftRotate((a), 4)) -#define leftRotate5(a) (leftRotate((a), 5)) -#define leftRotate6(a) (leftRotate((a), 6)) -#define leftRotate7(a) (leftRotate((a), 7)) -#define leftRotate8(a) (leftRotate((a), 8)) -#define leftRotate9(a) (leftRotate((a), 9)) -#define leftRotate10(a) (leftRotate((a), 10)) -#define leftRotate11(a) (leftRotate((a), 11)) -#define leftRotate12(a) (leftRotate((a), 12)) -#define leftRotate13(a) (leftRotate((a), 13)) -#define leftRotate14(a) (leftRotate((a), 14)) -#define leftRotate15(a) (leftRotate((a), 15)) -#define leftRotate16(a) (leftRotate((a), 16)) -#define leftRotate17(a) (leftRotate((a), 17)) -#define leftRotate18(a) (leftRotate((a), 18)) -#define leftRotate19(a) (leftRotate((a), 19)) -#define leftRotate20(a) (leftRotate((a), 20)) -#define leftRotate21(a) (leftRotate((a), 21)) -#define leftRotate22(a) (leftRotate((a), 22)) -#define leftRotate23(a) (leftRotate((a), 23)) -#define leftRotate24(a) (leftRotate((a), 24)) -#define leftRotate25(a) (leftRotate((a), 25)) -#define leftRotate26(a) (leftRotate((a), 26)) -#define leftRotate27(a) (leftRotate((a), 27)) -#define leftRotate28(a) (leftRotate((a), 28)) -#define leftRotate29(a) (leftRotate((a), 29)) -#define leftRotate30(a) (leftRotate((a), 30)) -#define leftRotate31(a) (leftRotate((a), 31)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1(a) (rightRotate((a), 1)) -#define rightRotate2(a) (rightRotate((a), 2)) -#define rightRotate3(a) (rightRotate((a), 3)) -#define rightRotate4(a) (rightRotate((a), 4)) -#define rightRotate5(a) (rightRotate((a), 5)) -#define rightRotate6(a) (rightRotate((a), 6)) -#define rightRotate7(a) (rightRotate((a), 7)) -#define rightRotate8(a) (rightRotate((a), 8)) -#define rightRotate9(a) (rightRotate((a), 9)) -#define rightRotate10(a) (rightRotate((a), 10)) -#define rightRotate11(a) (rightRotate((a), 11)) -#define rightRotate12(a) (rightRotate((a), 12)) -#define rightRotate13(a) (rightRotate((a), 13)) -#define rightRotate14(a) (rightRotate((a), 14)) -#define rightRotate15(a) (rightRotate((a), 15)) -#define rightRotate16(a) (rightRotate((a), 16)) -#define rightRotate17(a) (rightRotate((a), 17)) -#define rightRotate18(a) (rightRotate((a), 18)) -#define rightRotate19(a) (rightRotate((a), 19)) -#define rightRotate20(a) (rightRotate((a), 20)) -#define rightRotate21(a) (rightRotate((a), 21)) -#define rightRotate22(a) (rightRotate((a), 22)) -#define rightRotate23(a) (rightRotate((a), 23)) -#define rightRotate24(a) (rightRotate((a), 24)) -#define rightRotate25(a) (rightRotate((a), 25)) -#define rightRotate26(a) (rightRotate((a), 26)) -#define rightRotate27(a) (rightRotate((a), 27)) -#define rightRotate28(a) (rightRotate((a), 28)) -#define rightRotate29(a) (rightRotate((a), 29)) -#define rightRotate30(a) (rightRotate((a), 30)) -#define rightRotate31(a) (rightRotate((a), 31)) - -#else /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Composed rotation macros where 1 and 8 are fast, but others are slow */ - -/* Left rotate by 1 */ -#define leftRotate1(a) (leftRotate((a), 1)) - -/* Left rotate by 2 */ -#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) - -/* Left rotate by 3 */ -#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) - -/* Left rotate by 4 */ -#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 5: Rotate left by 8, then right by 3 */ -#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 6: Rotate left by 8, then right by 2 */ -#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 7: Rotate left by 8, then right by 1 */ -#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 8 */ -#define leftRotate8(a) (leftRotate((a), 8)) - -/* Left rotate by 9: Rotate left by 8, then left by 1 */ -#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) - -/* Left rotate by 10: Rotate left by 8, then left by 2 */ -#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) - -/* Left rotate by 11: Rotate left by 8, then left by 3 */ -#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) - -/* Left rotate by 12: Rotate left by 16, then right by 4 */ -#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 13: Rotate left by 16, then right by 3 */ -#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 14: Rotate left by 16, then right by 2 */ -#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 15: Rotate left by 16, then right by 1 */ -#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 16 */ -#define leftRotate16(a) (leftRotate((a), 16)) - -/* Left rotate by 17: Rotate left by 16, then left by 1 */ -#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) - -/* Left rotate by 18: Rotate left by 16, then left by 2 */ -#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) - -/* Left rotate by 19: Rotate left by 16, then left by 3 */ -#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) - -/* Left rotate by 20: Rotate left by 16, then left by 4 */ -#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) - -/* Left rotate by 21: Rotate left by 24, then right by 3 */ -#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 22: Rotate left by 24, then right by 2 */ -#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 23: Rotate left by 24, then right by 1 */ -#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 24 */ -#define leftRotate24(a) (leftRotate((a), 24)) - -/* Left rotate by 25: Rotate left by 24, then left by 1 */ -#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) - -/* Left rotate by 26: Rotate left by 24, then left by 2 */ -#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) - -/* Left rotate by 27: Rotate left by 24, then left by 3 */ -#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) - -/* Left rotate by 28: Rotate right by 4 */ -#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) - -/* Left rotate by 29: Rotate right by 3 */ -#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) - -/* Left rotate by 30: Rotate right by 2 */ -#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) - -/* Left rotate by 31: Rotate right by 1 */ -#define leftRotate31(a) (rightRotate((a), 1)) - -/* Define the 32-bit right rotations in terms of left rotations */ -#define rightRotate1(a) (leftRotate31((a))) -#define rightRotate2(a) (leftRotate30((a))) -#define rightRotate3(a) (leftRotate29((a))) -#define rightRotate4(a) (leftRotate28((a))) -#define rightRotate5(a) (leftRotate27((a))) -#define rightRotate6(a) (leftRotate26((a))) -#define rightRotate7(a) (leftRotate25((a))) -#define rightRotate8(a) (leftRotate24((a))) -#define rightRotate9(a) (leftRotate23((a))) -#define rightRotate10(a) (leftRotate22((a))) -#define rightRotate11(a) (leftRotate21((a))) -#define rightRotate12(a) (leftRotate20((a))) -#define rightRotate13(a) (leftRotate19((a))) -#define rightRotate14(a) (leftRotate18((a))) -#define rightRotate15(a) (leftRotate17((a))) -#define rightRotate16(a) (leftRotate16((a))) -#define rightRotate17(a) (leftRotate15((a))) -#define rightRotate18(a) (leftRotate14((a))) -#define rightRotate19(a) (leftRotate13((a))) -#define rightRotate20(a) (leftRotate12((a))) -#define rightRotate21(a) (leftRotate11((a))) -#define rightRotate22(a) (leftRotate10((a))) -#define rightRotate23(a) (leftRotate9((a))) -#define rightRotate24(a) (leftRotate8((a))) -#define rightRotate25(a) (leftRotate7((a))) -#define rightRotate26(a) (leftRotate6((a))) -#define rightRotate27(a) (leftRotate5((a))) -#define rightRotate28(a) (leftRotate4((a))) -#define rightRotate29(a) (leftRotate3((a))) -#define rightRotate30(a) (leftRotate2((a))) -#define rightRotate31(a) (leftRotate1((a))) - -#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ - -/* Rotation macros for 64-bit arguments */ - -/* Generic left rotate */ -#define leftRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (64 - (bits))); \ - })) - -/* Generic right rotate */ -#define rightRotate_64(a, bits) \ - (__extension__ ({ \ - uint64_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (64 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_64(a) (leftRotate_64((a), 1)) -#define leftRotate2_64(a) (leftRotate_64((a), 2)) -#define leftRotate3_64(a) (leftRotate_64((a), 3)) -#define leftRotate4_64(a) (leftRotate_64((a), 4)) -#define leftRotate5_64(a) (leftRotate_64((a), 5)) -#define leftRotate6_64(a) (leftRotate_64((a), 6)) -#define leftRotate7_64(a) (leftRotate_64((a), 7)) -#define leftRotate8_64(a) (leftRotate_64((a), 8)) -#define leftRotate9_64(a) (leftRotate_64((a), 9)) -#define leftRotate10_64(a) (leftRotate_64((a), 10)) -#define leftRotate11_64(a) (leftRotate_64((a), 11)) -#define leftRotate12_64(a) (leftRotate_64((a), 12)) -#define leftRotate13_64(a) (leftRotate_64((a), 13)) -#define leftRotate14_64(a) (leftRotate_64((a), 14)) -#define leftRotate15_64(a) (leftRotate_64((a), 15)) -#define leftRotate16_64(a) (leftRotate_64((a), 16)) -#define leftRotate17_64(a) (leftRotate_64((a), 17)) -#define leftRotate18_64(a) (leftRotate_64((a), 18)) -#define leftRotate19_64(a) (leftRotate_64((a), 19)) -#define leftRotate20_64(a) (leftRotate_64((a), 20)) -#define leftRotate21_64(a) (leftRotate_64((a), 21)) -#define leftRotate22_64(a) (leftRotate_64((a), 22)) -#define leftRotate23_64(a) (leftRotate_64((a), 23)) -#define leftRotate24_64(a) (leftRotate_64((a), 24)) -#define leftRotate25_64(a) (leftRotate_64((a), 25)) -#define leftRotate26_64(a) (leftRotate_64((a), 26)) -#define leftRotate27_64(a) (leftRotate_64((a), 27)) -#define leftRotate28_64(a) (leftRotate_64((a), 28)) -#define leftRotate29_64(a) (leftRotate_64((a), 29)) -#define leftRotate30_64(a) (leftRotate_64((a), 30)) -#define leftRotate31_64(a) (leftRotate_64((a), 31)) -#define leftRotate32_64(a) (leftRotate_64((a), 32)) -#define leftRotate33_64(a) (leftRotate_64((a), 33)) -#define leftRotate34_64(a) (leftRotate_64((a), 34)) -#define leftRotate35_64(a) (leftRotate_64((a), 35)) -#define leftRotate36_64(a) (leftRotate_64((a), 36)) -#define leftRotate37_64(a) (leftRotate_64((a), 37)) -#define leftRotate38_64(a) (leftRotate_64((a), 38)) -#define leftRotate39_64(a) (leftRotate_64((a), 39)) -#define leftRotate40_64(a) (leftRotate_64((a), 40)) -#define leftRotate41_64(a) (leftRotate_64((a), 41)) -#define leftRotate42_64(a) (leftRotate_64((a), 42)) -#define leftRotate43_64(a) (leftRotate_64((a), 43)) -#define leftRotate44_64(a) (leftRotate_64((a), 44)) -#define leftRotate45_64(a) (leftRotate_64((a), 45)) -#define leftRotate46_64(a) (leftRotate_64((a), 46)) -#define leftRotate47_64(a) (leftRotate_64((a), 47)) -#define leftRotate48_64(a) (leftRotate_64((a), 48)) -#define leftRotate49_64(a) (leftRotate_64((a), 49)) -#define leftRotate50_64(a) (leftRotate_64((a), 50)) -#define leftRotate51_64(a) (leftRotate_64((a), 51)) -#define leftRotate52_64(a) (leftRotate_64((a), 52)) -#define leftRotate53_64(a) (leftRotate_64((a), 53)) -#define leftRotate54_64(a) (leftRotate_64((a), 54)) -#define leftRotate55_64(a) (leftRotate_64((a), 55)) -#define leftRotate56_64(a) (leftRotate_64((a), 56)) -#define leftRotate57_64(a) (leftRotate_64((a), 57)) -#define leftRotate58_64(a) (leftRotate_64((a), 58)) -#define leftRotate59_64(a) (leftRotate_64((a), 59)) -#define leftRotate60_64(a) (leftRotate_64((a), 60)) -#define leftRotate61_64(a) (leftRotate_64((a), 61)) -#define leftRotate62_64(a) (leftRotate_64((a), 62)) -#define leftRotate63_64(a) (leftRotate_64((a), 63)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_64(a) (rightRotate_64((a), 1)) -#define rightRotate2_64(a) (rightRotate_64((a), 2)) -#define rightRotate3_64(a) (rightRotate_64((a), 3)) -#define rightRotate4_64(a) (rightRotate_64((a), 4)) -#define rightRotate5_64(a) (rightRotate_64((a), 5)) -#define rightRotate6_64(a) (rightRotate_64((a), 6)) -#define rightRotate7_64(a) (rightRotate_64((a), 7)) -#define rightRotate8_64(a) (rightRotate_64((a), 8)) -#define rightRotate9_64(a) (rightRotate_64((a), 9)) -#define rightRotate10_64(a) (rightRotate_64((a), 10)) -#define rightRotate11_64(a) (rightRotate_64((a), 11)) -#define rightRotate12_64(a) (rightRotate_64((a), 12)) -#define rightRotate13_64(a) (rightRotate_64((a), 13)) -#define rightRotate14_64(a) (rightRotate_64((a), 14)) -#define rightRotate15_64(a) (rightRotate_64((a), 15)) -#define rightRotate16_64(a) (rightRotate_64((a), 16)) -#define rightRotate17_64(a) (rightRotate_64((a), 17)) -#define rightRotate18_64(a) (rightRotate_64((a), 18)) -#define rightRotate19_64(a) (rightRotate_64((a), 19)) -#define rightRotate20_64(a) (rightRotate_64((a), 20)) -#define rightRotate21_64(a) (rightRotate_64((a), 21)) -#define rightRotate22_64(a) (rightRotate_64((a), 22)) -#define rightRotate23_64(a) (rightRotate_64((a), 23)) -#define rightRotate24_64(a) (rightRotate_64((a), 24)) -#define rightRotate25_64(a) (rightRotate_64((a), 25)) -#define rightRotate26_64(a) (rightRotate_64((a), 26)) -#define rightRotate27_64(a) (rightRotate_64((a), 27)) -#define rightRotate28_64(a) (rightRotate_64((a), 28)) -#define rightRotate29_64(a) (rightRotate_64((a), 29)) -#define rightRotate30_64(a) (rightRotate_64((a), 30)) -#define rightRotate31_64(a) (rightRotate_64((a), 31)) -#define rightRotate32_64(a) (rightRotate_64((a), 32)) -#define rightRotate33_64(a) (rightRotate_64((a), 33)) -#define rightRotate34_64(a) (rightRotate_64((a), 34)) -#define rightRotate35_64(a) (rightRotate_64((a), 35)) -#define rightRotate36_64(a) (rightRotate_64((a), 36)) -#define rightRotate37_64(a) (rightRotate_64((a), 37)) -#define rightRotate38_64(a) (rightRotate_64((a), 38)) -#define rightRotate39_64(a) (rightRotate_64((a), 39)) -#define rightRotate40_64(a) (rightRotate_64((a), 40)) -#define rightRotate41_64(a) (rightRotate_64((a), 41)) -#define rightRotate42_64(a) (rightRotate_64((a), 42)) -#define rightRotate43_64(a) (rightRotate_64((a), 43)) -#define rightRotate44_64(a) (rightRotate_64((a), 44)) -#define rightRotate45_64(a) (rightRotate_64((a), 45)) -#define rightRotate46_64(a) (rightRotate_64((a), 46)) -#define rightRotate47_64(a) (rightRotate_64((a), 47)) -#define rightRotate48_64(a) (rightRotate_64((a), 48)) -#define rightRotate49_64(a) (rightRotate_64((a), 49)) -#define rightRotate50_64(a) (rightRotate_64((a), 50)) -#define rightRotate51_64(a) (rightRotate_64((a), 51)) -#define rightRotate52_64(a) (rightRotate_64((a), 52)) -#define rightRotate53_64(a) (rightRotate_64((a), 53)) -#define rightRotate54_64(a) (rightRotate_64((a), 54)) -#define rightRotate55_64(a) (rightRotate_64((a), 55)) -#define rightRotate56_64(a) (rightRotate_64((a), 56)) -#define rightRotate57_64(a) (rightRotate_64((a), 57)) -#define rightRotate58_64(a) (rightRotate_64((a), 58)) -#define rightRotate59_64(a) (rightRotate_64((a), 59)) -#define rightRotate60_64(a) (rightRotate_64((a), 60)) -#define rightRotate61_64(a) (rightRotate_64((a), 61)) -#define rightRotate62_64(a) (rightRotate_64((a), 62)) -#define rightRotate63_64(a) (rightRotate_64((a), 63)) - -/* Rotate a 16-bit value left by a number of bits */ -#define leftRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (16 - (bits))); \ - })) - -/* Rotate a 16-bit value right by a number of bits */ -#define rightRotate_16(a, bits) \ - (__extension__ ({ \ - uint16_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (16 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_16(a) (leftRotate_16((a), 1)) -#define leftRotate2_16(a) (leftRotate_16((a), 2)) -#define leftRotate3_16(a) (leftRotate_16((a), 3)) -#define leftRotate4_16(a) (leftRotate_16((a), 4)) -#define leftRotate5_16(a) (leftRotate_16((a), 5)) -#define leftRotate6_16(a) (leftRotate_16((a), 6)) -#define leftRotate7_16(a) (leftRotate_16((a), 7)) -#define leftRotate8_16(a) (leftRotate_16((a), 8)) -#define leftRotate9_16(a) (leftRotate_16((a), 9)) -#define leftRotate10_16(a) (leftRotate_16((a), 10)) -#define leftRotate11_16(a) (leftRotate_16((a), 11)) -#define leftRotate12_16(a) (leftRotate_16((a), 12)) -#define leftRotate13_16(a) (leftRotate_16((a), 13)) -#define leftRotate14_16(a) (leftRotate_16((a), 14)) -#define leftRotate15_16(a) (leftRotate_16((a), 15)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_16(a) (rightRotate_16((a), 1)) -#define rightRotate2_16(a) (rightRotate_16((a), 2)) -#define rightRotate3_16(a) (rightRotate_16((a), 3)) -#define rightRotate4_16(a) (rightRotate_16((a), 4)) -#define rightRotate5_16(a) (rightRotate_16((a), 5)) -#define rightRotate6_16(a) (rightRotate_16((a), 6)) -#define rightRotate7_16(a) (rightRotate_16((a), 7)) -#define rightRotate8_16(a) (rightRotate_16((a), 8)) -#define rightRotate9_16(a) (rightRotate_16((a), 9)) -#define rightRotate10_16(a) (rightRotate_16((a), 10)) -#define rightRotate11_16(a) (rightRotate_16((a), 11)) -#define rightRotate12_16(a) (rightRotate_16((a), 12)) -#define rightRotate13_16(a) (rightRotate_16((a), 13)) -#define rightRotate14_16(a) (rightRotate_16((a), 14)) -#define rightRotate15_16(a) (rightRotate_16((a), 15)) - -/* Rotate an 8-bit value left by a number of bits */ -#define leftRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp << (bits)) | (_temp >> (8 - (bits))); \ - })) - -/* Rotate an 8-bit value right by a number of bits */ -#define rightRotate_8(a, bits) \ - (__extension__ ({ \ - uint8_t _temp = (a); \ - (_temp >> (bits)) | (_temp << (8 - (bits))); \ - })) - -/* Left rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define leftRotate1_8(a) (leftRotate_8((a), 1)) -#define leftRotate2_8(a) (leftRotate_8((a), 2)) -#define leftRotate3_8(a) (leftRotate_8((a), 3)) -#define leftRotate4_8(a) (leftRotate_8((a), 4)) -#define leftRotate5_8(a) (leftRotate_8((a), 5)) -#define leftRotate6_8(a) (leftRotate_8((a), 6)) -#define leftRotate7_8(a) (leftRotate_8((a), 7)) - -/* Right rotate by a specific number of bits. These macros may be replaced - * with more efficient ones on platforms that lack a barrel shifter */ -#define rightRotate1_8(a) (rightRotate_8((a), 1)) -#define rightRotate2_8(a) (rightRotate_8((a), 2)) -#define rightRotate3_8(a) (rightRotate_8((a), 3)) -#define rightRotate4_8(a) (rightRotate_8((a), 4)) -#define rightRotate5_8(a) (rightRotate_8((a), 5)) -#define rightRotate6_8(a) (rightRotate_8((a), 6)) -#define rightRotate7_8(a) (rightRotate_8((a), 7)) - -#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo-avr.S b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo-avr.S deleted file mode 100644 index 629c19d..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo-avr.S +++ /dev/null @@ -1,935 +0,0 @@ -#if defined(__AVR__) -#include -/* Automatically generated - do not edit */ - - .text -.global xoodoo_permute - .type xoodoo_permute, @function -xoodoo_permute: - push r28 - push r29 - push r2 - push r3 - push r4 - push r5 - push r6 - push r7 - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - movw r30,r24 -.L__stack_usage = 16 - ldi r18,88 - mov r19,r1 - rcall 34f - ldi r18,56 - rcall 34f - ldi r18,192 - ldi r19,3 - rcall 34f - ldi r18,208 - mov r19,r1 - rcall 34f - ldi r18,32 - ldi r19,1 - rcall 34f - ldi r18,20 - mov r19,r1 - rcall 34f - ldi r18,96 - rcall 34f - ldi r18,44 - rcall 34f - ldi r18,128 - ldi r19,3 - rcall 34f - ldi r18,240 - mov r19,r1 - rcall 34f - ldi r18,160 - ldi r19,1 - rcall 34f - ldi r18,18 - mov r19,r1 - rcall 34f - rjmp 888f -34: - ldd r6,Z+12 - ldd r7,Z+13 - ldd r8,Z+14 - ldd r9,Z+15 - ldd r0,Z+28 - eor r6,r0 - ldd r0,Z+29 - eor r7,r0 - ldd r0,Z+30 - eor r8,r0 - ldd r0,Z+31 - eor r9,r0 - ldd r0,Z+44 - eor r6,r0 - ldd r0,Z+45 - eor r7,r0 - ldd r0,Z+46 - eor r8,r0 - ldd r0,Z+47 - eor r9,r0 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - movw r10,r20 - movw r12,r22 - eor r10,r26 - eor r11,r27 - eor r12,r28 - eor r13,r29 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r9,r24 - eor r6,r25 - eor r7,r14 - eor r8,r15 - movw r14,r10 - movw r24,r12 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r13,r24 - eor r10,r25 - eor r11,r14 - eor r12,r15 - eor r20,r9 - eor r21,r6 - eor r22,r7 - eor r23,r8 - eor r26,r9 - eor r27,r6 - eor r28,r7 - eor r29,r8 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - std Z+16,r26 - std Z+17,r27 - std Z+18,r28 - std Z+19,r29 - std Z+32,r2 - std Z+33,r3 - std Z+34,r4 - std Z+35,r5 - ldd r20,Z+4 - ldd r21,Z+5 - ldd r22,Z+6 - ldd r23,Z+7 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r6,r20 - movw r8,r22 - eor r6,r26 - eor r7,r27 - eor r8,r28 - eor r9,r29 - eor r6,r2 - eor r7,r3 - eor r8,r4 - eor r9,r5 - movw r14,r6 - movw r24,r8 - mov r0,r1 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - lsr r9 - ror r8 - ror r7 - ror r6 - ror r0 - or r9,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r9,r24 - eor r6,r25 - eor r7,r14 - eor r8,r15 - eor r20,r13 - eor r21,r10 - eor r22,r11 - eor r23,r12 - eor r26,r13 - eor r27,r10 - eor r28,r11 - eor r29,r12 - eor r2,r13 - eor r3,r10 - eor r4,r11 - eor r5,r12 - std Z+4,r20 - std Z+5,r21 - std Z+6,r22 - std Z+7,r23 - std Z+20,r26 - std Z+21,r27 - std Z+22,r28 - std Z+23,r29 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - ldd r2,Z+40 - ldd r3,Z+41 - ldd r4,Z+42 - ldd r5,Z+43 - movw r10,r20 - movw r12,r22 - eor r10,r26 - eor r11,r27 - eor r12,r28 - eor r13,r29 - eor r10,r2 - eor r11,r3 - eor r12,r4 - eor r13,r5 - movw r14,r10 - movw r24,r12 - mov r0,r1 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - lsr r13 - ror r12 - ror r11 - ror r10 - ror r0 - or r13,r0 - mov r0,r1 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - lsr r25 - ror r24 - ror r15 - ror r14 - ror r0 - or r25,r0 - eor r13,r24 - eor r10,r25 - eor r11,r14 - eor r12,r15 - eor r20,r9 - eor r21,r6 - eor r22,r7 - eor r23,r8 - eor r26,r9 - eor r27,r6 - eor r28,r7 - eor r29,r8 - eor r2,r9 - eor r3,r6 - eor r4,r7 - eor r5,r8 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - std Z+24,r26 - std Z+25,r27 - std Z+26,r28 - std Z+27,r29 - std Z+40,r2 - std Z+41,r3 - std Z+42,r4 - std Z+43,r5 - ldd r0,Z+12 - eor r0,r13 - std Z+12,r0 - ldd r0,Z+13 - eor r0,r10 - std Z+13,r0 - ldd r0,Z+14 - eor r0,r11 - std Z+14,r0 - ldd r0,Z+15 - eor r0,r12 - std Z+15,r0 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 - eor r6,r13 - eor r7,r10 - eor r8,r11 - eor r9,r12 - ldd r14,Z+44 - ldd r15,Z+45 - ldd r24,Z+46 - ldd r25,Z+47 - eor r14,r13 - eor r15,r10 - eor r24,r11 - eor r25,r12 - ldd r10,Z+24 - ldd r11,Z+25 - ldd r12,Z+26 - ldd r13,Z+27 - std Z+28,r10 - std Z+29,r11 - std Z+30,r12 - std Z+31,r13 - ldd r10,Z+20 - ldd r11,Z+21 - ldd r12,Z+22 - ldd r13,Z+23 - std Z+24,r10 - std Z+25,r11 - std Z+26,r12 - std Z+27,r13 - ldd r10,Z+16 - ldd r11,Z+17 - ldd r12,Z+18 - ldd r13,Z+19 - std Z+20,r10 - std Z+21,r11 - std Z+22,r12 - std Z+23,r13 - std Z+16,r6 - std Z+17,r7 - std Z+18,r8 - std Z+19,r9 - ldd r6,Z+32 - ldd r7,Z+33 - ldd r8,Z+34 - ldd r9,Z+35 - mov r0,r9 - mov r9,r8 - mov r8,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+32,r6 - std Z+33,r7 - std Z+34,r8 - std Z+35,r9 - ldd r6,Z+36 - ldd r7,Z+37 - ldd r8,Z+38 - ldd r9,Z+39 - mov r0,r9 - mov r9,r8 - mov r8,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+36,r6 - std Z+37,r7 - std Z+38,r8 - std Z+39,r9 - ldd r6,Z+40 - ldd r7,Z+41 - ldd r8,Z+42 - ldd r9,Z+43 - mov r0,r9 - mov r9,r8 - mov r8,r7 - mov r7,r6 - mov r6,r0 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+40,r6 - std Z+41,r7 - std Z+42,r8 - std Z+43,r9 - mov r0,r25 - mov r25,r24 - mov r24,r15 - mov r15,r14 - mov r14,r0 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - lsl r14 - rol r15 - rol r24 - rol r25 - adc r14,r1 - std Z+44,r14 - std Z+45,r15 - std Z+46,r24 - std Z+47,r25 - ld r20,Z - ldd r21,Z+1 - ldd r22,Z+2 - ldd r23,Z+3 - eor r20,r18 - eor r21,r19 - ldd r26,Z+16 - ldd r27,Z+17 - ldd r28,Z+18 - ldd r29,Z+19 - ldd r2,Z+32 - ldd r3,Z+33 - ldd r4,Z+34 - ldd r5,Z+35 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - st Z,r20 - std Z+1,r21 - std Z+2,r22 - std Z+3,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+16,r26 - std Z+17,r27 - std Z+18,r28 - std Z+19,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+32,r2 - std Z+33,r3 - std Z+34,r4 - std Z+35,r5 - ldd r20,Z+4 - ldd r21,Z+5 - ldd r22,Z+6 - ldd r23,Z+7 - ldd r26,Z+20 - ldd r27,Z+21 - ldd r28,Z+22 - ldd r29,Z+23 - ldd r2,Z+36 - ldd r3,Z+37 - ldd r4,Z+38 - ldd r5,Z+39 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - std Z+4,r20 - std Z+5,r21 - std Z+6,r22 - std Z+7,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+20,r26 - std Z+21,r27 - std Z+22,r28 - std Z+23,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+36,r2 - std Z+37,r3 - std Z+38,r4 - std Z+39,r5 - ldd r20,Z+8 - ldd r21,Z+9 - ldd r22,Z+10 - ldd r23,Z+11 - ldd r26,Z+24 - ldd r27,Z+25 - ldd r28,Z+26 - ldd r29,Z+27 - ldd r2,Z+40 - ldd r3,Z+41 - ldd r4,Z+42 - ldd r5,Z+43 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - std Z+8,r20 - std Z+9,r21 - std Z+10,r22 - std Z+11,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+24,r26 - std Z+25,r27 - std Z+26,r28 - std Z+27,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+40,r2 - std Z+41,r3 - std Z+42,r4 - std Z+43,r5 - ldd r20,Z+12 - ldd r21,Z+13 - ldd r22,Z+14 - ldd r23,Z+15 - ldd r26,Z+28 - ldd r27,Z+29 - ldd r28,Z+30 - ldd r29,Z+31 - ldd r2,Z+44 - ldd r3,Z+45 - ldd r4,Z+46 - ldd r5,Z+47 - movw r6,r2 - movw r8,r4 - mov r0,r26 - com r0 - and r6,r0 - mov r0,r27 - com r0 - and r7,r0 - mov r0,r28 - com r0 - and r8,r0 - mov r0,r29 - com r0 - and r9,r0 - eor r20,r6 - eor r21,r7 - eor r22,r8 - eor r23,r9 - std Z+12,r20 - std Z+13,r21 - std Z+14,r22 - std Z+15,r23 - movw r6,r20 - movw r8,r22 - mov r0,r2 - com r0 - and r6,r0 - mov r0,r3 - com r0 - and r7,r0 - mov r0,r4 - com r0 - and r8,r0 - mov r0,r5 - com r0 - and r9,r0 - eor r26,r6 - eor r27,r7 - eor r28,r8 - eor r29,r9 - std Z+28,r26 - std Z+29,r27 - std Z+30,r28 - std Z+31,r29 - mov r0,r20 - com r0 - and r26,r0 - mov r0,r21 - com r0 - and r27,r0 - mov r0,r22 - com r0 - and r28,r0 - mov r0,r23 - com r0 - and r29,r0 - eor r2,r26 - eor r3,r27 - eor r4,r28 - eor r5,r29 - std Z+44,r2 - std Z+45,r3 - std Z+46,r4 - std Z+47,r5 - ldd r6,Z+16 - ldd r7,Z+17 - ldd r8,Z+18 - ldd r9,Z+19 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+16,r6 - std Z+17,r7 - std Z+18,r8 - std Z+19,r9 - ldd r6,Z+20 - ldd r7,Z+21 - ldd r8,Z+22 - ldd r9,Z+23 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+20,r6 - std Z+21,r7 - std Z+22,r8 - std Z+23,r9 - ldd r6,Z+24 - ldd r7,Z+25 - ldd r8,Z+26 - ldd r9,Z+27 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+24,r6 - std Z+25,r7 - std Z+26,r8 - std Z+27,r9 - ldd r6,Z+28 - ldd r7,Z+29 - ldd r8,Z+30 - ldd r9,Z+31 - lsl r6 - rol r7 - rol r8 - rol r9 - adc r6,r1 - std Z+28,r6 - std Z+29,r7 - std Z+30,r8 - std Z+31,r9 - ldd r6,Z+40 - ldd r7,Z+41 - ldd r8,Z+42 - ldd r9,Z+43 - ldd r10,Z+44 - ldd r11,Z+45 - ldd r12,Z+46 - ldd r13,Z+47 - ldd r14,Z+32 - ldd r15,Z+33 - ldd r24,Z+34 - ldd r25,Z+35 - std Z+40,r25 - std Z+41,r14 - std Z+42,r15 - std Z+43,r24 - ldd r14,Z+36 - ldd r15,Z+37 - ldd r24,Z+38 - ldd r25,Z+39 - std Z+44,r25 - std Z+45,r14 - std Z+46,r15 - std Z+47,r24 - std Z+32,r9 - std Z+33,r6 - std Z+34,r7 - std Z+35,r8 - std Z+36,r13 - std Z+37,r10 - std Z+38,r11 - std Z+39,r12 - ret -888: - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop r7 - pop r6 - pop r5 - pop r4 - pop r3 - pop r2 - pop r29 - pop r28 - ret - .size xoodoo_permute, .-xoodoo_permute - -#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.c deleted file mode 100644 index 59bb8bf..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "internal-xoodoo.h" - -#if !defined(__AVR__) - -void xoodoo_permute(xoodoo_state_t *state) -{ - static uint16_t const rc[XOODOO_ROUNDS] = { - 0x0058, 0x0038, 0x03C0, 0x00D0, 0x0120, 0x0014, - 0x0060, 0x002C, 0x0380, 0x00F0, 0x01A0, 0x0012 - }; - uint8_t round; - uint32_t x00, x01, x02, x03; - uint32_t x10, x11, x12, x13; - uint32_t x20, x21, x22, x23; - uint32_t t1, t2; - - /* Load the state and convert from little-endian byte order */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - x00 = state->S[0][0]; - x01 = state->S[0][1]; - x02 = state->S[0][2]; - x03 = state->S[0][3]; - x10 = state->S[1][0]; - x11 = state->S[1][1]; - x12 = state->S[1][2]; - x13 = state->S[1][3]; - x20 = state->S[2][0]; - x21 = state->S[2][1]; - x22 = state->S[2][2]; - x23 = state->S[2][3]; -#else - x00 = le_load_word32(state->B); - x01 = le_load_word32(state->B + 4); - x02 = le_load_word32(state->B + 8); - x03 = le_load_word32(state->B + 12); - x10 = le_load_word32(state->B + 16); - x11 = le_load_word32(state->B + 20); - x12 = le_load_word32(state->B + 24); - x13 = le_load_word32(state->B + 28); - x20 = le_load_word32(state->B + 32); - x21 = le_load_word32(state->B + 36); - x22 = le_load_word32(state->B + 40); - x23 = le_load_word32(state->B + 44); -#endif - - /* Perform all permutation rounds */ - for (round = 0; round < XOODOO_ROUNDS; ++round) { - /* Optimization ideas from the Xoodoo implementation here: - * https://github.com/XKCP/XKCP/tree/master/lib/low/Xoodoo/Optimized */ - - /* Step theta: Mix column parity */ - t1 = x03 ^ x13 ^ x23; - t2 = x00 ^ x10 ^ x20; - t1 = leftRotate5(t1) ^ leftRotate14(t1); - t2 = leftRotate5(t2) ^ leftRotate14(t2); - x00 ^= t1; - x10 ^= t1; - x20 ^= t1; - t1 = x01 ^ x11 ^ x21; - t1 = leftRotate5(t1) ^ leftRotate14(t1); - x01 ^= t2; - x11 ^= t2; - x21 ^= t2; - t2 = x02 ^ x12 ^ x22; - t2 = leftRotate5(t2) ^ leftRotate14(t2); - x02 ^= t1; - x12 ^= t1; - x22 ^= t1; - x03 ^= t2; - x13 ^= t2; - x23 ^= t2; - - /* Step rho-west: Plane shift */ - t1 = x13; - x13 = x12; - x12 = x11; - x11 = x10; - x10 = t1; - x20 = leftRotate11(x20); - x21 = leftRotate11(x21); - x22 = leftRotate11(x22); - x23 = leftRotate11(x23); - - /* Step iota: Add the round constant to the state */ - x00 ^= rc[round]; - - /* Step chi: Non-linear layer */ - x00 ^= (~x10) & x20; - x10 ^= (~x20) & x00; - x20 ^= (~x00) & x10; - x01 ^= (~x11) & x21; - x11 ^= (~x21) & x01; - x21 ^= (~x01) & x11; - x02 ^= (~x12) & x22; - x12 ^= (~x22) & x02; - x22 ^= (~x02) & x12; - x03 ^= (~x13) & x23; - x13 ^= (~x23) & x03; - x23 ^= (~x03) & x13; - - /* Step rho-east: Plane shift */ - x10 = leftRotate1(x10); - x11 = leftRotate1(x11); - x12 = leftRotate1(x12); - x13 = leftRotate1(x13); - t1 = leftRotate8(x22); - t2 = leftRotate8(x23); - x22 = leftRotate8(x20); - x23 = leftRotate8(x21); - x20 = t1; - x21 = t2; - } - - /* Convert back into little-endian and store to the output state */ -#if defined(LW_UTIL_LITTLE_ENDIAN) - state->S[0][0] = x00; - state->S[0][1] = x01; - state->S[0][2] = x02; - state->S[0][3] = x03; - state->S[1][0] = x10; - state->S[1][1] = x11; - state->S[1][2] = x12; - state->S[1][3] = x13; - state->S[2][0] = x20; - state->S[2][1] = x21; - state->S[2][2] = x22; - state->S[2][3] = x23; -#else - le_store_word32(state->B, x00); - le_store_word32(state->B + 4, x01); - le_store_word32(state->B + 8, x02); - le_store_word32(state->B + 12, x03); - le_store_word32(state->B + 16, x10); - le_store_word32(state->B + 20, x11); - le_store_word32(state->B + 24, x12); - le_store_word32(state->B + 28, x13); - le_store_word32(state->B + 32, x20); - le_store_word32(state->B + 36, x21); - le_store_word32(state->B + 40, x22); - le_store_word32(state->B + 44, x23); -#endif -} - -#endif /* !__AVR__ */ diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.h deleted file mode 100644 index f6eddd8..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/internal-xoodoo.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LW_INTERNAL_XOODOO_H -#define LW_INTERNAL_XOODOO_H - -#include "internal-util.h" - -/** - * \file internal-xoodoo.h - * \brief Internal implementation of the Xoodoo permutation. - * - * References: https://keccak.team/xoodyak.html - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Number of rows in the Xoodoo state. - */ -#define XOODOO_ROWS 3 - -/** - * \brief Number of columns in the Xoodoo state. - */ -#define XOODOO_COLS 4 - -/** - * \brief Number of rounds for the Xoodoo permutation. - */ -#define XOODOO_ROUNDS 12 - -/** - * \brief State information for the Xoodoo permutation. - */ -typedef union -{ - /** Words of the state */ - uint32_t S[XOODOO_ROWS][XOODOO_COLS]; - - /** Bytes of the state */ - uint8_t B[XOODOO_ROWS * XOODOO_COLS * sizeof(uint32_t)]; - -} xoodoo_state_t; - -/** - * \brief Permutes the Xoodoo state. - * - * \param state The Xoodoo state. - * - * The state will be in little-endian before and after the operation. - */ -void xoodoo_permute(xoodoo_state_t *state); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.c deleted file mode 100644 index 4ad4fce..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.c +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "xoodyak.h" -#include "internal-xoodoo.h" -#include - -aead_cipher_t const xoodyak_cipher = { - "Xoodyak", - XOODYAK_KEY_SIZE, - XOODYAK_NONCE_SIZE, - XOODYAK_TAG_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - xoodyak_aead_encrypt, - xoodyak_aead_decrypt -}; - -aead_hash_algorithm_t const xoodyak_hash_algorithm = { - "Xoodyak-Hash", - sizeof(xoodyak_hash_state_t), - XOODYAK_HASH_SIZE, - AEAD_FLAG_LITTLE_ENDIAN, - xoodyak_hash, - (aead_hash_init_t)xoodyak_hash_init, - (aead_hash_update_t)xoodyak_hash_absorb, - (aead_hash_finalize_t)xoodyak_hash_finalize, - (aead_xof_absorb_t)xoodyak_hash_absorb, - (aead_xof_squeeze_t)xoodyak_hash_squeeze -}; - -/** - * \brief Rate for absorbing data into the sponge state. - */ -#define XOODYAK_ABSORB_RATE 44 - -/** - * \brief Rate for squeezing data out of the sponge. - */ -#define XOODYAK_SQUEEZE_RATE 24 - -/** - * \brief Rate for absorbing and squeezing in hashing mode. - */ -#define XOODYAK_HASH_RATE 16 - -/** - * \brief Phase identifier for "up" mode, which indicates that a block - * permutation has just been performed. - */ -#define XOODYAK_PHASE_UP 0 - -/** - * \brief Phase identifier for "down" mode, which indicates that data has - * been absorbed but that a block permutation has not been done yet. - */ -#define XOODYAK_PHASE_DOWN 1 - -/** - * \brief Absorbs data into the Xoodoo permutation state. - * - * \param state Xoodoo permutation state. - * \param phase Points to the current phase, up or down. - * \param data Points to the data to be absorbed. - * \param len Length of the data to be absorbed. - */ -static void xoodyak_absorb - (xoodoo_state_t *state, uint8_t *phase, - const unsigned char *data, unsigned long long len) -{ - uint8_t domain = 0x03; - unsigned temp; - while (len > XOODYAK_ABSORB_RATE) { - if (*phase != XOODYAK_PHASE_UP) - xoodoo_permute(state); - lw_xor_block(state->B, data, XOODYAK_ABSORB_RATE); - state->B[XOODYAK_ABSORB_RATE] ^= 0x01; /* Padding */ - state->B[sizeof(state->B) - 1] ^= domain; - data += XOODYAK_ABSORB_RATE; - len -= XOODYAK_ABSORB_RATE; - domain = 0x00; - *phase = XOODYAK_PHASE_DOWN; - } - temp = (unsigned)len; - if (*phase != XOODYAK_PHASE_UP) - xoodoo_permute(state); - lw_xor_block(state->B, data, temp); - state->B[temp] ^= 0x01; /* Padding */ - state->B[sizeof(state->B) - 1] ^= domain; - *phase = XOODYAK_PHASE_DOWN; -} - -int xoodyak_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k) -{ - xoodoo_state_t state; - uint8_t phase, domain; - unsigned temp; - (void)nsec; - - /* Set the length of the returned ciphertext */ - *clen = mlen + XOODYAK_TAG_SIZE; - - /* Initialize the state with the key */ - memcpy(state.B, k, XOODYAK_KEY_SIZE); - memset(state.B + XOODYAK_KEY_SIZE, 0, sizeof(state.B) - XOODYAK_KEY_SIZE); - state.B[XOODYAK_KEY_SIZE + 1] = 0x01; /* Padding */ - state.B[sizeof(state.B) - 1] = 0x02; /* Domain separation */ - phase = XOODYAK_PHASE_DOWN; - - /* Absorb the nonce and associated data */ - xoodyak_absorb(&state, &phase, npub, XOODYAK_NONCE_SIZE); - xoodyak_absorb(&state, &phase, ad, adlen); - - /* Encrypt the plaintext to produce the ciphertext */ - domain = 0x80; - while (mlen > XOODYAK_SQUEEZE_RATE) { - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - lw_xor_block_2_dest(c, state.B, m, XOODYAK_SQUEEZE_RATE); - state.B[XOODYAK_SQUEEZE_RATE] ^= 0x01; /* Padding */ - c += XOODYAK_SQUEEZE_RATE; - m += XOODYAK_SQUEEZE_RATE; - mlen -= XOODYAK_SQUEEZE_RATE; - domain = 0; - } - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - temp = (unsigned)mlen; - lw_xor_block_2_dest(c, state.B, m, temp); - state.B[temp] ^= 0x01; /* Padding */ - c += temp; - - /* Generate the authentication tag */ - state.B[sizeof(state.B) - 1] ^= 0x40; /* Domain separation */ - xoodoo_permute(&state); - memcpy(c, state.B, XOODYAK_TAG_SIZE); - return 0; -} - -int xoodyak_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k) -{ - xoodoo_state_t state; - uint8_t phase, domain; - unsigned temp; - unsigned char *mtemp = m; - (void)nsec; - - /* Validate the ciphertext length and set the return "mlen" value */ - if (clen < XOODYAK_TAG_SIZE) - return -1; - *mlen = clen - XOODYAK_TAG_SIZE; - - /* Initialize the state with the key */ - memcpy(state.B, k, XOODYAK_KEY_SIZE); - memset(state.B + XOODYAK_KEY_SIZE, 0, sizeof(state.B) - XOODYAK_KEY_SIZE); - state.B[XOODYAK_KEY_SIZE + 1] = 0x01; /* Padding */ - state.B[sizeof(state.B) - 1] = 0x02; /* Domain separation */ - phase = XOODYAK_PHASE_DOWN; - - /* Absorb the nonce and associated data */ - xoodyak_absorb(&state, &phase, npub, XOODYAK_NONCE_SIZE); - xoodyak_absorb(&state, &phase, ad, adlen); - - /* Decrypt the ciphertext to produce the plaintext */ - domain = 0x80; - clen -= XOODYAK_TAG_SIZE; - while (clen > XOODYAK_SQUEEZE_RATE) { - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - lw_xor_block_swap(m, state.B, c, XOODYAK_SQUEEZE_RATE); - state.B[XOODYAK_SQUEEZE_RATE] ^= 0x01; /* Padding */ - c += XOODYAK_SQUEEZE_RATE; - m += XOODYAK_SQUEEZE_RATE; - clen -= XOODYAK_SQUEEZE_RATE; - domain = 0; - } - state.B[sizeof(state.B) - 1] ^= domain; - xoodoo_permute(&state); - temp = (unsigned)clen; - lw_xor_block_swap(m, state.B, c, temp); - state.B[temp] ^= 0x01; /* Padding */ - c += temp; - - /* Check the authentication tag */ - state.B[sizeof(state.B) - 1] ^= 0x40; /* Domain separation */ - xoodoo_permute(&state); - return aead_check_tag(mtemp, *mlen, state.B, c, XOODYAK_TAG_SIZE); -} - -int xoodyak_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen) -{ - xoodyak_hash_state_t state; - xoodyak_hash_init(&state); - xoodyak_hash_absorb(&state, in, inlen); - xoodyak_hash_squeeze(&state, out, XOODYAK_HASH_SIZE); - return 0; -} - -#define XOODYAK_HASH_MODE_INIT_ABSORB 0 -#define XOODYAK_HASH_MODE_ABSORB 1 -#define XOODYAK_HASH_MODE_SQUEEZE 2 - -#define xoodoo_hash_permute(state) \ - xoodoo_permute((xoodoo_state_t *)((state)->s.state)) - -void xoodyak_hash_init(xoodyak_hash_state_t *state) -{ - memset(state, 0, sizeof(xoodyak_hash_state_t)); - state->s.mode = XOODYAK_HASH_MODE_INIT_ABSORB; -} - -void xoodyak_hash_absorb - (xoodyak_hash_state_t *state, const unsigned char *in, - unsigned long long inlen) -{ - uint8_t domain; - unsigned temp; - - /* If we were squeezing, then restart the absorb phase */ - if (state->s.mode == XOODYAK_HASH_MODE_SQUEEZE) { - xoodoo_hash_permute(state); - state->s.mode = XOODYAK_HASH_MODE_INIT_ABSORB; - state->s.count = 0; - } - - /* The first block needs a different domain separator to the others */ - domain = (state->s.mode == XOODYAK_HASH_MODE_INIT_ABSORB) ? 0x01 : 0x00; - - /* Absorb the input data into the state */ - while (inlen > 0) { - if (state->s.count >= XOODYAK_HASH_RATE) { - state->s.state[XOODYAK_HASH_RATE] ^= 0x01; /* Padding */ - state->s.state[sizeof(state->s.state) - 1] ^= domain; - xoodoo_hash_permute(state); - state->s.mode = XOODYAK_HASH_MODE_ABSORB; - state->s.count = 0; - domain = 0x00; - } - temp = XOODYAK_HASH_RATE - state->s.count; - if (temp > inlen) - temp = (unsigned)inlen; - lw_xor_block(state->s.state + state->s.count, in, temp); - state->s.count += temp; - in += temp; - inlen -= temp; - } -} - -void xoodyak_hash_squeeze - (xoodyak_hash_state_t *state, unsigned char *out, - unsigned long long outlen) -{ - uint8_t domain; - unsigned temp; - - /* If we were absorbing, then terminate the absorb phase */ - if (state->s.mode != XOODYAK_HASH_MODE_SQUEEZE) { - domain = (state->s.mode == XOODYAK_HASH_MODE_INIT_ABSORB) ? 0x01 : 0x00; - state->s.state[state->s.count] ^= 0x01; /* Padding */ - state->s.state[sizeof(state->s.state) - 1] ^= domain; - xoodoo_hash_permute(state); - state->s.mode = XOODYAK_HASH_MODE_SQUEEZE; - state->s.count = 0; - } - - /* Squeeze data out of the state */ - while (outlen > 0) { - if (state->s.count >= XOODYAK_HASH_RATE) { - /* Padding is always at index 0 for squeezing subsequent - * blocks because the number of bytes we have absorbed - * since the previous block was squeezed out is zero */ - state->s.state[0] ^= 0x01; - xoodoo_hash_permute(state); - state->s.count = 0; - } - temp = XOODYAK_HASH_RATE - state->s.count; - if (temp > outlen) - temp = (unsigned)outlen; - memcpy(out, state->s.state + state->s.count, temp); - state->s.count += temp; - out += temp; - outlen -= temp; - } -} - -void xoodyak_hash_finalize - (xoodyak_hash_state_t *state, unsigned char *out) -{ - xoodyak_hash_squeeze(state, out, XOODYAK_HASH_SIZE); -} diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.h deleted file mode 100644 index f4777d5..0000000 --- a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys-avr/xoodyak.h +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (C) 2020 Southern Storm Software, Pty Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LWCRYPTO_XOODYAK_H -#define LWCRYPTO_XOODYAK_H - -#include "aead-common.h" - -/** - * \file xoodyak.h - * \brief Xoodyak authenticated encryption algorithm. - * - * Xoodyak is an authenticated encryption and hash algorithm pair based - * around the 384-bit Xoodoo permutation that is similar in structure to - * Keccak but is more efficient than Keccak on 32-bit embedded devices. - * The Cyclist mode of operation is used to convert the permutation - * into a sponge for the higher-level algorithms. - * - * The Xoodyak encryption mode has a 128-bit key, a 128-bit nonce, - * and a 128-bit authentication tag. The Xoodyak hashing mode has a - * 256-bit fixed hash output and can also be used as an extensible - * output function (XOF). - * - * The Xoodyak specification describes a re-keying mechanism where the - * key for one packet is used to derive the key to use on the next packet. - * This provides some resistance against side channel attacks by making - * the session key a moving target. This library does not currently - * implement re-keying. - * - * References: https://keccak.team/xoodyak.html - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \brief Size of the key for Xoodyak. - */ -#define XOODYAK_KEY_SIZE 16 - -/** - * \brief Size of the authentication tag for Xoodyak. - */ -#define XOODYAK_TAG_SIZE 16 - -/** - * \brief Size of the nonce for Xoodyak. - */ -#define XOODYAK_NONCE_SIZE 16 - -/** - * \brief Size of the hash output for Xoodyak. - */ -#define XOODYAK_HASH_SIZE 32 - -/** - * \brief State information for Xoodyak incremental hashing modes. - */ -typedef union -{ - struct { - unsigned char state[48]; /**< Current hash state */ - unsigned char count; /**< Number of bytes in the current block */ - unsigned char mode; /**< Hash mode: absorb or squeeze */ - } s; /**< State */ - unsigned long long align; /**< For alignment of this structure */ - -} xoodyak_hash_state_t; - -/** - * \brief Meta-information block for the Xoodyak cipher. - */ -extern aead_cipher_t const xoodyak_cipher; - -/** - * \brief Meta-information block for the Xoodyak hash algorithm. - */ -extern aead_hash_algorithm_t const xoodyak_hash_algorithm; - -/** - * \brief Encrypts and authenticates a packet with Xoodyak. - * - * \param c Buffer to receive the output. - * \param clen On exit, set to the length of the output which includes - * the ciphertext and the 16 byte authentication tag. - * \param m Buffer that contains the plaintext message to encrypt. - * \param mlen Length of the plaintext message in bytes. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param nsec Secret nonce - not used by this algorithm. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to encrypt the packet. - * - * \return 0 on success, or a negative value if there was an error in - * the parameters. - * - * \sa xoodyak_aead_decrypt() - */ -int xoodyak_aead_encrypt - (unsigned char *c, unsigned long long *clen, - const unsigned char *m, unsigned long long mlen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *nsec, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Decrypts and authenticates a packet with Xoodyak. - * - * \param m Buffer to receive the plaintext message on output. - * \param mlen Receives the length of the plaintext message on output. - * \param nsec Secret nonce - not used by this algorithm. - * \param c Buffer that contains the ciphertext and authentication - * tag to decrypt. - * \param clen Length of the input data in bytes, which includes the - * ciphertext and the 16 byte authentication tag. - * \param ad Buffer that contains associated data to authenticate - * along with the packet but which does not need to be encrypted. - * \param adlen Length of the associated data in bytes. - * \param npub Points to the public nonce for the packet which must - * be 16 bytes in length. - * \param k Points to the 16 bytes of the key to use to decrypt the packet. - * - * \return 0 on success, -1 if the authentication tag was incorrect, - * or some other negative number if there was an error in the parameters. - * - * \sa xoodyak_aead_encrypt() - */ -int xoodyak_aead_decrypt - (unsigned char *m, unsigned long long *mlen, - unsigned char *nsec, - const unsigned char *c, unsigned long long clen, - const unsigned char *ad, unsigned long long adlen, - const unsigned char *npub, - const unsigned char *k); - -/** - * \brief Hashes a block of input data with Xoodyak to generate a hash value. - * - * \param out Buffer to receive the hash output which must be at least - * XOODYAK_HASH_SIZE bytes in length. - * \param in Points to the input data to be hashed. - * \param inlen Length of the input data in bytes. - * - * \return Returns zero on success or -1 if there was an error in the - * parameters. - */ -int xoodyak_hash - (unsigned char *out, const unsigned char *in, unsigned long long inlen); - -/** - * \brief Initializes the state for a Xoodyak hashing operation. - * - * \param state Hash state to be initialized. - * - * \sa xoodyak_hash_absorb(), xoodyak_hash_squeeze(), xoodyak_hash() - */ -void xoodyak_hash_init(xoodyak_hash_state_t *state); - -/** - * \brief Aborbs more input data into a Xoodyak hashing state. - * - * \param state Hash state to be updated. - * \param in Points to the input data to be absorbed into the state. - * \param inlen Length of the input data to be absorbed into the state. - * - * \sa xoodyak_hash_init(), xoodyak_hash_squeeze() - */ -void xoodyak_hash_absorb - (xoodyak_hash_state_t *state, const unsigned char *in, - unsigned long long inlen); - -/** - * \brief Squeezes output data from a Xoodyak hashing state. - * - * \param state Hash state to squeeze the output data from. - * \param out Points to the output buffer to receive the squeezed data. - * \param outlen Number of bytes of data to squeeze out of the state. - * - * \sa xoodyak_hash_init(), xoodyak_hash_absorb() - */ -void xoodyak_hash_squeeze - (xoodyak_hash_state_t *state, unsigned char *out, - unsigned long long outlen); - -/** - * \brief Returns the final hash value from a Xoodyak hashing operation. - * - * \param state Hash state to be finalized. - * \param out Points to the output buffer to receive the hash value. - * - * \note This is a wrapper around xoodyak_hash_squeeze() for a fixed length - * of XOODYAK_HASH_SIZE bytes. - * - * \sa xoodyak_hash_init(), xoodyak_hash_absorb() - */ -void xoodyak_hash_finalize - (xoodyak_hash_state_t *state, unsigned char *out); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.c new file mode 100644 index 0000000..84fc53a --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "aead-common.h" + +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = (accum - 1) >> 8; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} + +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned size, int precheck) +{ + /* Set "accum" to -1 if the tags match, or 0 if they don't match */ + int accum = 0; + while (size > 0) { + accum |= (*tag1++ ^ *tag2++); + --size; + } + accum = ((accum - 1) >> 8) & precheck; + + /* Destroy the plaintext if the tag match failed */ + while (plaintext_len > 0) { + *plaintext++ &= accum; + --plaintext_len; + } + + /* If "accum" is 0, return -1, otherwise return 0 */ + return ~accum; +} diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.h new file mode 100644 index 0000000..2be95eb --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/aead-common.h @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_AEAD_COMMON_H +#define LWCRYPTO_AEAD_COMMON_H + +#include + +/** + * \file aead-common.h + * \brief Definitions that are common across AEAD schemes. + * + * AEAD stands for "Authenticated Encryption with Associated Data". + * It is a standard API pattern for securely encrypting and + * authenticating packets of data. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Encrypts and authenticates a packet with an AEAD scheme. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + */ +typedef int (*aead_cipher_encrypt_t) + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with an AEAD scheme. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - normally not used by AEAD schemes. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet. + * \param k Points to the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + */ +typedef int (*aead_cipher_decrypt_t) + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data. + * + * \param out Buffer to receive the hash output. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +typedef int (*aead_hash_t) + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a hashing operation. + * + * \param state Hash state to be initialized. + */ +typedef void (*aead_hash_init_t)(void *state); + +/** + * \brief Updates a hash state with more input data. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be incorporated into the state. + * \param inlen Length of the input data to be incorporated into the state. + */ +typedef void (*aead_hash_update_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Returns the final hash value from a hashing operation. + * + * \param Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + */ +typedef void (*aead_hash_finalize_t)(void *state, unsigned char *out); + +/** + * \brief Aborbs more input data into an XOF state. + * + * \param state XOF state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa ascon_xof_init(), ascon_xof_squeeze() + */ +typedef void (*aead_xof_absorb_t) + (void *state, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Squeezes output data from an XOF state. + * + * \param state XOF state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + */ +typedef void (*aead_xof_squeeze_t) + (void *state, unsigned char *out, unsigned long long outlen); + +/** + * \brief No special AEAD features. + */ +#define AEAD_FLAG_NONE 0x0000 + +/** + * \brief The natural byte order of the AEAD cipher is little-endian. + * + * If this flag is not present, then the natural byte order of the + * AEAD cipher should be assumed to be big-endian. + * + * The natural byte order may be useful when formatting packet sequence + * numbers as nonces. The application needs to know whether the sequence + * number should be packed into the leading or trailing bytes of the nonce. + */ +#define AEAD_FLAG_LITTLE_ENDIAN 0x0001 + +/** + * \brief Meta-information about an AEAD cipher. + */ +typedef struct +{ + const char *name; /**< Name of the cipher */ + unsigned key_len; /**< Length of the key in bytes */ + unsigned nonce_len; /**< Length of the nonce in bytes */ + unsigned tag_len; /**< Length of the tag in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_cipher_encrypt_t encrypt; /**< AEAD encryption function */ + aead_cipher_decrypt_t decrypt; /**< AEAD decryption function */ + +} aead_cipher_t; + +/** + * \brief Meta-information about a hash algorithm that is related to an AEAD. + * + * Regular hash algorithms should provide the "hash", "init", "update", + * and "finalize" functions. Extensible Output Functions (XOF's) should + * proivde the "hash", "init", "absorb", and "squeeze" functions. + */ +typedef struct +{ + const char *name; /**< Name of the hash algorithm */ + size_t state_size; /**< Size of the incremental state structure */ + unsigned hash_len; /**< Length of the hash in bytes */ + unsigned flags; /**< Flags for extra features */ + aead_hash_t hash; /**< All in one hashing function */ + aead_hash_init_t init; /**< Incremental hash/XOF init function */ + aead_hash_update_t update; /**< Incremental hash update function */ + aead_hash_finalize_t finalize; /**< Incremental hash finalize function */ + aead_xof_absorb_t absorb; /**< Incremental XOF absorb function */ + aead_xof_squeeze_t squeeze; /**< Incremental XOF squeeze function */ + +} aead_hash_algorithm_t; + +/** + * \brief Check an authentication tag in constant time. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + */ +int aead_check_tag + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len); + +/** + * \brief Check an authentication tag in constant time with a previous check. + * + * \param plaintext Points to the plaintext data. + * \param plaintext_len Length of the plaintext in bytes. + * \param tag1 First tag to compare. + * \param tag2 Second tag to compare. + * \param tag_len Length of the tags in bytes. + * \param precheck Set to -1 if previous check succeeded or 0 if it failed. + * + * \return Returns -1 if the tag check failed or 0 if the check succeeded. + * + * If the tag check fails, then the \a plaintext will also be zeroed to + * prevent it from being used accidentally by the application when the + * ciphertext was invalid. + * + * This version can be used to incorporate other information about the + * correctness of the plaintext into the final result. + */ +int aead_check_tag_precheck + (unsigned char *plaintext, unsigned long long plaintext_len, + const unsigned char *tag1, const unsigned char *tag2, + unsigned tag_len, int precheck); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/api.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/api.h new file mode 100644 index 0000000..ae8c7f6 --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/api.h @@ -0,0 +1 @@ +#define CRYPTO_BYTES 32 diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/hash.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/hash.c new file mode 100644 index 0000000..34d3b1c --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/hash.c @@ -0,0 +1,8 @@ + +#include "xoodyak.h" + +int crypto_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + return xoodyak_hash(out, in, inlen); +} diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-util.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-util.h new file mode 100644 index 0000000..e30166d --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-util.h @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_UTIL_H +#define LW_INTERNAL_UTIL_H + +#include + +/* Figure out how to inline functions using this C compiler */ +#if defined(__STDC__) && __STDC_VERSION__ >= 199901L +#define STATIC_INLINE static inline +#elif defined(__GNUC__) || defined(__clang__) +#define STATIC_INLINE static __inline__ +#else +#define STATIC_INLINE static +#endif + +/* Try to figure out whether the CPU is little-endian or big-endian. + * May need to modify this to include new compiler-specific defines. + * Alternatively, define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ in your + * compiler flags when you compile this library */ +#if defined(__x86_64) || defined(__x86_64__) || \ + defined(__i386) || defined(__i386__) || \ + defined(__AVR__) || defined(__arm) || defined(__arm__) || \ + defined(_M_AMD64) || defined(_M_X64) || defined(_M_IX86) || \ + defined(_M_IA64) || defined(_M_ARM) || defined(_M_ARM_FP) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 1234) || \ + defined(__LITTLE_ENDIAN__) +#define LW_UTIL_LITTLE_ENDIAN 1 +#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == 4321) || \ + defined(__BIG_ENDIAN__) +/* Big endian */ +#else +#error "Cannot determine the endianess of this platform" +#endif + +/* Helper macros to load and store values while converting endian-ness */ + +/* Load a big-endian 32-bit word from a byte buffer */ +#define be_load_word32(ptr) \ + ((((uint32_t)((ptr)[0])) << 24) | \ + (((uint32_t)((ptr)[1])) << 16) | \ + (((uint32_t)((ptr)[2])) << 8) | \ + ((uint32_t)((ptr)[3]))) + +/* Store a big-endian 32-bit word into a byte buffer */ +#define be_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 24); \ + (ptr)[1] = (uint8_t)(_x >> 16); \ + (ptr)[2] = (uint8_t)(_x >> 8); \ + (ptr)[3] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 32-bit word from a byte buffer */ +#define le_load_word32(ptr) \ + ((((uint32_t)((ptr)[3])) << 24) | \ + (((uint32_t)((ptr)[2])) << 16) | \ + (((uint32_t)((ptr)[1])) << 8) | \ + ((uint32_t)((ptr)[0]))) + +/* Store a little-endian 32-bit word into a byte buffer */ +#define le_store_word32(ptr, x) \ + do { \ + uint32_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + } while (0) + +/* Load a big-endian 64-bit word from a byte buffer */ +#define be_load_word64(ptr) \ + ((((uint64_t)((ptr)[0])) << 56) | \ + (((uint64_t)((ptr)[1])) << 48) | \ + (((uint64_t)((ptr)[2])) << 40) | \ + (((uint64_t)((ptr)[3])) << 32) | \ + (((uint64_t)((ptr)[4])) << 24) | \ + (((uint64_t)((ptr)[5])) << 16) | \ + (((uint64_t)((ptr)[6])) << 8) | \ + ((uint64_t)((ptr)[7]))) + +/* Store a big-endian 64-bit word into a byte buffer */ +#define be_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 56); \ + (ptr)[1] = (uint8_t)(_x >> 48); \ + (ptr)[2] = (uint8_t)(_x >> 40); \ + (ptr)[3] = (uint8_t)(_x >> 32); \ + (ptr)[4] = (uint8_t)(_x >> 24); \ + (ptr)[5] = (uint8_t)(_x >> 16); \ + (ptr)[6] = (uint8_t)(_x >> 8); \ + (ptr)[7] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 64-bit word from a byte buffer */ +#define le_load_word64(ptr) \ + ((((uint64_t)((ptr)[7])) << 56) | \ + (((uint64_t)((ptr)[6])) << 48) | \ + (((uint64_t)((ptr)[5])) << 40) | \ + (((uint64_t)((ptr)[4])) << 32) | \ + (((uint64_t)((ptr)[3])) << 24) | \ + (((uint64_t)((ptr)[2])) << 16) | \ + (((uint64_t)((ptr)[1])) << 8) | \ + ((uint64_t)((ptr)[0]))) + +/* Store a little-endian 64-bit word into a byte buffer */ +#define le_store_word64(ptr, x) \ + do { \ + uint64_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + (ptr)[2] = (uint8_t)(_x >> 16); \ + (ptr)[3] = (uint8_t)(_x >> 24); \ + (ptr)[4] = (uint8_t)(_x >> 32); \ + (ptr)[5] = (uint8_t)(_x >> 40); \ + (ptr)[6] = (uint8_t)(_x >> 48); \ + (ptr)[7] = (uint8_t)(_x >> 56); \ + } while (0) + +/* Load a big-endian 16-bit word from a byte buffer */ +#define be_load_word16(ptr) \ + ((((uint16_t)((ptr)[0])) << 8) | \ + ((uint16_t)((ptr)[1]))) + +/* Store a big-endian 16-bit word into a byte buffer */ +#define be_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)(_x >> 8); \ + (ptr)[1] = (uint8_t)_x; \ + } while (0) + +/* Load a little-endian 16-bit word from a byte buffer */ +#define le_load_word16(ptr) \ + ((((uint16_t)((ptr)[1])) << 8) | \ + ((uint16_t)((ptr)[0]))) + +/* Store a little-endian 16-bit word into a byte buffer */ +#define le_store_word16(ptr, x) \ + do { \ + uint16_t _x = (x); \ + (ptr)[0] = (uint8_t)_x; \ + (ptr)[1] = (uint8_t)(_x >> 8); \ + } while (0) + +/* XOR a source byte buffer against a destination */ +#define lw_xor_block(dest, src, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ ^= *_src++; \ + --_len; \ + } \ + } while (0) + +/* XOR two source byte buffers and put the result in a destination buffer */ +#define lw_xor_block_2_src(dest, src1, src2, len) \ + do { \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest++ = *_src1++ ^ *_src2++; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time */ +#define lw_xor_block_2_dest(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + *_dest2++ = (*_dest++ ^= *_src++); \ + --_len; \ + } \ + } while (0) + +/* XOR two byte buffers and write to a destination which at the same + * time copying the contents of src2 to dest2 */ +#define lw_xor_block_copy_src(dest2, dest, src1, src2, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src1 = (src1); \ + const unsigned char *_src2 = (src2); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src2++; \ + *_dest2++ = _temp; \ + *_dest++ = *_src1++ ^ _temp; \ + --_len; \ + } \ + } while (0) + +/* XOR a source byte buffer against a destination and write to another + * destination at the same time. This version swaps the source value + * into the "dest" buffer */ +#define lw_xor_block_swap(dest2, dest, src, len) \ + do { \ + unsigned char *_dest2 = (dest2); \ + unsigned char *_dest = (dest); \ + const unsigned char *_src = (src); \ + unsigned _len = (len); \ + while (_len > 0) { \ + unsigned char _temp = *_src++; \ + *_dest2++ = *_dest ^ _temp; \ + *_dest++ = _temp; \ + --_len; \ + } \ + } while (0) + +/* Rotation functions need to be optimised for best performance on AVR. + * The most efficient rotations are where the number of bits is 1 or a + * multiple of 8, so we compose the efficient rotations to produce all + * other rotation counts of interest. */ + +#if defined(__AVR__) +#define LW_CRYPTO_ROTATE32_COMPOSED 1 +#else +#define LW_CRYPTO_ROTATE32_COMPOSED 0 +#endif + +/* Rotation macros for 32-bit arguments */ + +/* Generic left rotate */ +#define leftRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (32 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate(a, bits) \ + (__extension__ ({ \ + uint32_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (32 - (bits))); \ + })) + +#if !LW_CRYPTO_ROTATE32_COMPOSED + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1(a) (leftRotate((a), 1)) +#define leftRotate2(a) (leftRotate((a), 2)) +#define leftRotate3(a) (leftRotate((a), 3)) +#define leftRotate4(a) (leftRotate((a), 4)) +#define leftRotate5(a) (leftRotate((a), 5)) +#define leftRotate6(a) (leftRotate((a), 6)) +#define leftRotate7(a) (leftRotate((a), 7)) +#define leftRotate8(a) (leftRotate((a), 8)) +#define leftRotate9(a) (leftRotate((a), 9)) +#define leftRotate10(a) (leftRotate((a), 10)) +#define leftRotate11(a) (leftRotate((a), 11)) +#define leftRotate12(a) (leftRotate((a), 12)) +#define leftRotate13(a) (leftRotate((a), 13)) +#define leftRotate14(a) (leftRotate((a), 14)) +#define leftRotate15(a) (leftRotate((a), 15)) +#define leftRotate16(a) (leftRotate((a), 16)) +#define leftRotate17(a) (leftRotate((a), 17)) +#define leftRotate18(a) (leftRotate((a), 18)) +#define leftRotate19(a) (leftRotate((a), 19)) +#define leftRotate20(a) (leftRotate((a), 20)) +#define leftRotate21(a) (leftRotate((a), 21)) +#define leftRotate22(a) (leftRotate((a), 22)) +#define leftRotate23(a) (leftRotate((a), 23)) +#define leftRotate24(a) (leftRotate((a), 24)) +#define leftRotate25(a) (leftRotate((a), 25)) +#define leftRotate26(a) (leftRotate((a), 26)) +#define leftRotate27(a) (leftRotate((a), 27)) +#define leftRotate28(a) (leftRotate((a), 28)) +#define leftRotate29(a) (leftRotate((a), 29)) +#define leftRotate30(a) (leftRotate((a), 30)) +#define leftRotate31(a) (leftRotate((a), 31)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1(a) (rightRotate((a), 1)) +#define rightRotate2(a) (rightRotate((a), 2)) +#define rightRotate3(a) (rightRotate((a), 3)) +#define rightRotate4(a) (rightRotate((a), 4)) +#define rightRotate5(a) (rightRotate((a), 5)) +#define rightRotate6(a) (rightRotate((a), 6)) +#define rightRotate7(a) (rightRotate((a), 7)) +#define rightRotate8(a) (rightRotate((a), 8)) +#define rightRotate9(a) (rightRotate((a), 9)) +#define rightRotate10(a) (rightRotate((a), 10)) +#define rightRotate11(a) (rightRotate((a), 11)) +#define rightRotate12(a) (rightRotate((a), 12)) +#define rightRotate13(a) (rightRotate((a), 13)) +#define rightRotate14(a) (rightRotate((a), 14)) +#define rightRotate15(a) (rightRotate((a), 15)) +#define rightRotate16(a) (rightRotate((a), 16)) +#define rightRotate17(a) (rightRotate((a), 17)) +#define rightRotate18(a) (rightRotate((a), 18)) +#define rightRotate19(a) (rightRotate((a), 19)) +#define rightRotate20(a) (rightRotate((a), 20)) +#define rightRotate21(a) (rightRotate((a), 21)) +#define rightRotate22(a) (rightRotate((a), 22)) +#define rightRotate23(a) (rightRotate((a), 23)) +#define rightRotate24(a) (rightRotate((a), 24)) +#define rightRotate25(a) (rightRotate((a), 25)) +#define rightRotate26(a) (rightRotate((a), 26)) +#define rightRotate27(a) (rightRotate((a), 27)) +#define rightRotate28(a) (rightRotate((a), 28)) +#define rightRotate29(a) (rightRotate((a), 29)) +#define rightRotate30(a) (rightRotate((a), 30)) +#define rightRotate31(a) (rightRotate((a), 31)) + +#else /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Composed rotation macros where 1 and 8 are fast, but others are slow */ + +/* Left rotate by 1 */ +#define leftRotate1(a) (leftRotate((a), 1)) + +/* Left rotate by 2 */ +#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1)) + +/* Left rotate by 3 */ +#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1)) + +/* Left rotate by 4 */ +#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 5: Rotate left by 8, then right by 3 */ +#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 6: Rotate left by 8, then right by 2 */ +#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 7: Rotate left by 8, then right by 1 */ +#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 8 */ +#define leftRotate8(a) (leftRotate((a), 8)) + +/* Left rotate by 9: Rotate left by 8, then left by 1 */ +#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1)) + +/* Left rotate by 10: Rotate left by 8, then left by 2 */ +#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1)) + +/* Left rotate by 11: Rotate left by 8, then left by 3 */ +#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1)) + +/* Left rotate by 12: Rotate left by 16, then right by 4 */ +#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 13: Rotate left by 16, then right by 3 */ +#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 14: Rotate left by 16, then right by 2 */ +#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 15: Rotate left by 16, then right by 1 */ +#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 16 */ +#define leftRotate16(a) (leftRotate((a), 16)) + +/* Left rotate by 17: Rotate left by 16, then left by 1 */ +#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1)) + +/* Left rotate by 18: Rotate left by 16, then left by 2 */ +#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1)) + +/* Left rotate by 19: Rotate left by 16, then left by 3 */ +#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1)) + +/* Left rotate by 20: Rotate left by 16, then left by 4 */ +#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1)) + +/* Left rotate by 21: Rotate left by 24, then right by 3 */ +#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 22: Rotate left by 24, then right by 2 */ +#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 23: Rotate left by 24, then right by 1 */ +#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 24 */ +#define leftRotate24(a) (leftRotate((a), 24)) + +/* Left rotate by 25: Rotate left by 24, then left by 1 */ +#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1)) + +/* Left rotate by 26: Rotate left by 24, then left by 2 */ +#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1)) + +/* Left rotate by 27: Rotate left by 24, then left by 3 */ +#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1)) + +/* Left rotate by 28: Rotate right by 4 */ +#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1)) + +/* Left rotate by 29: Rotate right by 3 */ +#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1)) + +/* Left rotate by 30: Rotate right by 2 */ +#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1)) + +/* Left rotate by 31: Rotate right by 1 */ +#define leftRotate31(a) (rightRotate((a), 1)) + +/* Define the 32-bit right rotations in terms of left rotations */ +#define rightRotate1(a) (leftRotate31((a))) +#define rightRotate2(a) (leftRotate30((a))) +#define rightRotate3(a) (leftRotate29((a))) +#define rightRotate4(a) (leftRotate28((a))) +#define rightRotate5(a) (leftRotate27((a))) +#define rightRotate6(a) (leftRotate26((a))) +#define rightRotate7(a) (leftRotate25((a))) +#define rightRotate8(a) (leftRotate24((a))) +#define rightRotate9(a) (leftRotate23((a))) +#define rightRotate10(a) (leftRotate22((a))) +#define rightRotate11(a) (leftRotate21((a))) +#define rightRotate12(a) (leftRotate20((a))) +#define rightRotate13(a) (leftRotate19((a))) +#define rightRotate14(a) (leftRotate18((a))) +#define rightRotate15(a) (leftRotate17((a))) +#define rightRotate16(a) (leftRotate16((a))) +#define rightRotate17(a) (leftRotate15((a))) +#define rightRotate18(a) (leftRotate14((a))) +#define rightRotate19(a) (leftRotate13((a))) +#define rightRotate20(a) (leftRotate12((a))) +#define rightRotate21(a) (leftRotate11((a))) +#define rightRotate22(a) (leftRotate10((a))) +#define rightRotate23(a) (leftRotate9((a))) +#define rightRotate24(a) (leftRotate8((a))) +#define rightRotate25(a) (leftRotate7((a))) +#define rightRotate26(a) (leftRotate6((a))) +#define rightRotate27(a) (leftRotate5((a))) +#define rightRotate28(a) (leftRotate4((a))) +#define rightRotate29(a) (leftRotate3((a))) +#define rightRotate30(a) (leftRotate2((a))) +#define rightRotate31(a) (leftRotate1((a))) + +#endif /* LW_CRYPTO_ROTATE32_COMPOSED */ + +/* Rotation macros for 64-bit arguments */ + +/* Generic left rotate */ +#define leftRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (64 - (bits))); \ + })) + +/* Generic right rotate */ +#define rightRotate_64(a, bits) \ + (__extension__ ({ \ + uint64_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (64 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_64(a) (leftRotate_64((a), 1)) +#define leftRotate2_64(a) (leftRotate_64((a), 2)) +#define leftRotate3_64(a) (leftRotate_64((a), 3)) +#define leftRotate4_64(a) (leftRotate_64((a), 4)) +#define leftRotate5_64(a) (leftRotate_64((a), 5)) +#define leftRotate6_64(a) (leftRotate_64((a), 6)) +#define leftRotate7_64(a) (leftRotate_64((a), 7)) +#define leftRotate8_64(a) (leftRotate_64((a), 8)) +#define leftRotate9_64(a) (leftRotate_64((a), 9)) +#define leftRotate10_64(a) (leftRotate_64((a), 10)) +#define leftRotate11_64(a) (leftRotate_64((a), 11)) +#define leftRotate12_64(a) (leftRotate_64((a), 12)) +#define leftRotate13_64(a) (leftRotate_64((a), 13)) +#define leftRotate14_64(a) (leftRotate_64((a), 14)) +#define leftRotate15_64(a) (leftRotate_64((a), 15)) +#define leftRotate16_64(a) (leftRotate_64((a), 16)) +#define leftRotate17_64(a) (leftRotate_64((a), 17)) +#define leftRotate18_64(a) (leftRotate_64((a), 18)) +#define leftRotate19_64(a) (leftRotate_64((a), 19)) +#define leftRotate20_64(a) (leftRotate_64((a), 20)) +#define leftRotate21_64(a) (leftRotate_64((a), 21)) +#define leftRotate22_64(a) (leftRotate_64((a), 22)) +#define leftRotate23_64(a) (leftRotate_64((a), 23)) +#define leftRotate24_64(a) (leftRotate_64((a), 24)) +#define leftRotate25_64(a) (leftRotate_64((a), 25)) +#define leftRotate26_64(a) (leftRotate_64((a), 26)) +#define leftRotate27_64(a) (leftRotate_64((a), 27)) +#define leftRotate28_64(a) (leftRotate_64((a), 28)) +#define leftRotate29_64(a) (leftRotate_64((a), 29)) +#define leftRotate30_64(a) (leftRotate_64((a), 30)) +#define leftRotate31_64(a) (leftRotate_64((a), 31)) +#define leftRotate32_64(a) (leftRotate_64((a), 32)) +#define leftRotate33_64(a) (leftRotate_64((a), 33)) +#define leftRotate34_64(a) (leftRotate_64((a), 34)) +#define leftRotate35_64(a) (leftRotate_64((a), 35)) +#define leftRotate36_64(a) (leftRotate_64((a), 36)) +#define leftRotate37_64(a) (leftRotate_64((a), 37)) +#define leftRotate38_64(a) (leftRotate_64((a), 38)) +#define leftRotate39_64(a) (leftRotate_64((a), 39)) +#define leftRotate40_64(a) (leftRotate_64((a), 40)) +#define leftRotate41_64(a) (leftRotate_64((a), 41)) +#define leftRotate42_64(a) (leftRotate_64((a), 42)) +#define leftRotate43_64(a) (leftRotate_64((a), 43)) +#define leftRotate44_64(a) (leftRotate_64((a), 44)) +#define leftRotate45_64(a) (leftRotate_64((a), 45)) +#define leftRotate46_64(a) (leftRotate_64((a), 46)) +#define leftRotate47_64(a) (leftRotate_64((a), 47)) +#define leftRotate48_64(a) (leftRotate_64((a), 48)) +#define leftRotate49_64(a) (leftRotate_64((a), 49)) +#define leftRotate50_64(a) (leftRotate_64((a), 50)) +#define leftRotate51_64(a) (leftRotate_64((a), 51)) +#define leftRotate52_64(a) (leftRotate_64((a), 52)) +#define leftRotate53_64(a) (leftRotate_64((a), 53)) +#define leftRotate54_64(a) (leftRotate_64((a), 54)) +#define leftRotate55_64(a) (leftRotate_64((a), 55)) +#define leftRotate56_64(a) (leftRotate_64((a), 56)) +#define leftRotate57_64(a) (leftRotate_64((a), 57)) +#define leftRotate58_64(a) (leftRotate_64((a), 58)) +#define leftRotate59_64(a) (leftRotate_64((a), 59)) +#define leftRotate60_64(a) (leftRotate_64((a), 60)) +#define leftRotate61_64(a) (leftRotate_64((a), 61)) +#define leftRotate62_64(a) (leftRotate_64((a), 62)) +#define leftRotate63_64(a) (leftRotate_64((a), 63)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_64(a) (rightRotate_64((a), 1)) +#define rightRotate2_64(a) (rightRotate_64((a), 2)) +#define rightRotate3_64(a) (rightRotate_64((a), 3)) +#define rightRotate4_64(a) (rightRotate_64((a), 4)) +#define rightRotate5_64(a) (rightRotate_64((a), 5)) +#define rightRotate6_64(a) (rightRotate_64((a), 6)) +#define rightRotate7_64(a) (rightRotate_64((a), 7)) +#define rightRotate8_64(a) (rightRotate_64((a), 8)) +#define rightRotate9_64(a) (rightRotate_64((a), 9)) +#define rightRotate10_64(a) (rightRotate_64((a), 10)) +#define rightRotate11_64(a) (rightRotate_64((a), 11)) +#define rightRotate12_64(a) (rightRotate_64((a), 12)) +#define rightRotate13_64(a) (rightRotate_64((a), 13)) +#define rightRotate14_64(a) (rightRotate_64((a), 14)) +#define rightRotate15_64(a) (rightRotate_64((a), 15)) +#define rightRotate16_64(a) (rightRotate_64((a), 16)) +#define rightRotate17_64(a) (rightRotate_64((a), 17)) +#define rightRotate18_64(a) (rightRotate_64((a), 18)) +#define rightRotate19_64(a) (rightRotate_64((a), 19)) +#define rightRotate20_64(a) (rightRotate_64((a), 20)) +#define rightRotate21_64(a) (rightRotate_64((a), 21)) +#define rightRotate22_64(a) (rightRotate_64((a), 22)) +#define rightRotate23_64(a) (rightRotate_64((a), 23)) +#define rightRotate24_64(a) (rightRotate_64((a), 24)) +#define rightRotate25_64(a) (rightRotate_64((a), 25)) +#define rightRotate26_64(a) (rightRotate_64((a), 26)) +#define rightRotate27_64(a) (rightRotate_64((a), 27)) +#define rightRotate28_64(a) (rightRotate_64((a), 28)) +#define rightRotate29_64(a) (rightRotate_64((a), 29)) +#define rightRotate30_64(a) (rightRotate_64((a), 30)) +#define rightRotate31_64(a) (rightRotate_64((a), 31)) +#define rightRotate32_64(a) (rightRotate_64((a), 32)) +#define rightRotate33_64(a) (rightRotate_64((a), 33)) +#define rightRotate34_64(a) (rightRotate_64((a), 34)) +#define rightRotate35_64(a) (rightRotate_64((a), 35)) +#define rightRotate36_64(a) (rightRotate_64((a), 36)) +#define rightRotate37_64(a) (rightRotate_64((a), 37)) +#define rightRotate38_64(a) (rightRotate_64((a), 38)) +#define rightRotate39_64(a) (rightRotate_64((a), 39)) +#define rightRotate40_64(a) (rightRotate_64((a), 40)) +#define rightRotate41_64(a) (rightRotate_64((a), 41)) +#define rightRotate42_64(a) (rightRotate_64((a), 42)) +#define rightRotate43_64(a) (rightRotate_64((a), 43)) +#define rightRotate44_64(a) (rightRotate_64((a), 44)) +#define rightRotate45_64(a) (rightRotate_64((a), 45)) +#define rightRotate46_64(a) (rightRotate_64((a), 46)) +#define rightRotate47_64(a) (rightRotate_64((a), 47)) +#define rightRotate48_64(a) (rightRotate_64((a), 48)) +#define rightRotate49_64(a) (rightRotate_64((a), 49)) +#define rightRotate50_64(a) (rightRotate_64((a), 50)) +#define rightRotate51_64(a) (rightRotate_64((a), 51)) +#define rightRotate52_64(a) (rightRotate_64((a), 52)) +#define rightRotate53_64(a) (rightRotate_64((a), 53)) +#define rightRotate54_64(a) (rightRotate_64((a), 54)) +#define rightRotate55_64(a) (rightRotate_64((a), 55)) +#define rightRotate56_64(a) (rightRotate_64((a), 56)) +#define rightRotate57_64(a) (rightRotate_64((a), 57)) +#define rightRotate58_64(a) (rightRotate_64((a), 58)) +#define rightRotate59_64(a) (rightRotate_64((a), 59)) +#define rightRotate60_64(a) (rightRotate_64((a), 60)) +#define rightRotate61_64(a) (rightRotate_64((a), 61)) +#define rightRotate62_64(a) (rightRotate_64((a), 62)) +#define rightRotate63_64(a) (rightRotate_64((a), 63)) + +/* Rotate a 16-bit value left by a number of bits */ +#define leftRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (16 - (bits))); \ + })) + +/* Rotate a 16-bit value right by a number of bits */ +#define rightRotate_16(a, bits) \ + (__extension__ ({ \ + uint16_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (16 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_16(a) (leftRotate_16((a), 1)) +#define leftRotate2_16(a) (leftRotate_16((a), 2)) +#define leftRotate3_16(a) (leftRotate_16((a), 3)) +#define leftRotate4_16(a) (leftRotate_16((a), 4)) +#define leftRotate5_16(a) (leftRotate_16((a), 5)) +#define leftRotate6_16(a) (leftRotate_16((a), 6)) +#define leftRotate7_16(a) (leftRotate_16((a), 7)) +#define leftRotate8_16(a) (leftRotate_16((a), 8)) +#define leftRotate9_16(a) (leftRotate_16((a), 9)) +#define leftRotate10_16(a) (leftRotate_16((a), 10)) +#define leftRotate11_16(a) (leftRotate_16((a), 11)) +#define leftRotate12_16(a) (leftRotate_16((a), 12)) +#define leftRotate13_16(a) (leftRotate_16((a), 13)) +#define leftRotate14_16(a) (leftRotate_16((a), 14)) +#define leftRotate15_16(a) (leftRotate_16((a), 15)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_16(a) (rightRotate_16((a), 1)) +#define rightRotate2_16(a) (rightRotate_16((a), 2)) +#define rightRotate3_16(a) (rightRotate_16((a), 3)) +#define rightRotate4_16(a) (rightRotate_16((a), 4)) +#define rightRotate5_16(a) (rightRotate_16((a), 5)) +#define rightRotate6_16(a) (rightRotate_16((a), 6)) +#define rightRotate7_16(a) (rightRotate_16((a), 7)) +#define rightRotate8_16(a) (rightRotate_16((a), 8)) +#define rightRotate9_16(a) (rightRotate_16((a), 9)) +#define rightRotate10_16(a) (rightRotate_16((a), 10)) +#define rightRotate11_16(a) (rightRotate_16((a), 11)) +#define rightRotate12_16(a) (rightRotate_16((a), 12)) +#define rightRotate13_16(a) (rightRotate_16((a), 13)) +#define rightRotate14_16(a) (rightRotate_16((a), 14)) +#define rightRotate15_16(a) (rightRotate_16((a), 15)) + +/* Rotate an 8-bit value left by a number of bits */ +#define leftRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp << (bits)) | (_temp >> (8 - (bits))); \ + })) + +/* Rotate an 8-bit value right by a number of bits */ +#define rightRotate_8(a, bits) \ + (__extension__ ({ \ + uint8_t _temp = (a); \ + (_temp >> (bits)) | (_temp << (8 - (bits))); \ + })) + +/* Left rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define leftRotate1_8(a) (leftRotate_8((a), 1)) +#define leftRotate2_8(a) (leftRotate_8((a), 2)) +#define leftRotate3_8(a) (leftRotate_8((a), 3)) +#define leftRotate4_8(a) (leftRotate_8((a), 4)) +#define leftRotate5_8(a) (leftRotate_8((a), 5)) +#define leftRotate6_8(a) (leftRotate_8((a), 6)) +#define leftRotate7_8(a) (leftRotate_8((a), 7)) + +/* Right rotate by a specific number of bits. These macros may be replaced + * with more efficient ones on platforms that lack a barrel shifter */ +#define rightRotate1_8(a) (rightRotate_8((a), 1)) +#define rightRotate2_8(a) (rightRotate_8((a), 2)) +#define rightRotate3_8(a) (rightRotate_8((a), 3)) +#define rightRotate4_8(a) (rightRotate_8((a), 4)) +#define rightRotate5_8(a) (rightRotate_8((a), 5)) +#define rightRotate6_8(a) (rightRotate_8((a), 6)) +#define rightRotate7_8(a) (rightRotate_8((a), 7)) + +#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo-avr.S b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo-avr.S new file mode 100644 index 0000000..629c19d --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo-avr.S @@ -0,0 +1,935 @@ +#if defined(__AVR__) +#include +/* Automatically generated - do not edit */ + + .text +.global xoodoo_permute + .type xoodoo_permute, @function +xoodoo_permute: + push r28 + push r29 + push r2 + push r3 + push r4 + push r5 + push r6 + push r7 + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + movw r30,r24 +.L__stack_usage = 16 + ldi r18,88 + mov r19,r1 + rcall 34f + ldi r18,56 + rcall 34f + ldi r18,192 + ldi r19,3 + rcall 34f + ldi r18,208 + mov r19,r1 + rcall 34f + ldi r18,32 + ldi r19,1 + rcall 34f + ldi r18,20 + mov r19,r1 + rcall 34f + ldi r18,96 + rcall 34f + ldi r18,44 + rcall 34f + ldi r18,128 + ldi r19,3 + rcall 34f + ldi r18,240 + mov r19,r1 + rcall 34f + ldi r18,160 + ldi r19,1 + rcall 34f + ldi r18,18 + mov r19,r1 + rcall 34f + rjmp 888f +34: + ldd r6,Z+12 + ldd r7,Z+13 + ldd r8,Z+14 + ldd r9,Z+15 + ldd r0,Z+28 + eor r6,r0 + ldd r0,Z+29 + eor r7,r0 + ldd r0,Z+30 + eor r8,r0 + ldd r0,Z+31 + eor r9,r0 + ldd r0,Z+44 + eor r6,r0 + ldd r0,Z+45 + eor r7,r0 + ldd r0,Z+46 + eor r8,r0 + ldd r0,Z+47 + eor r9,r0 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + movw r10,r20 + movw r12,r22 + eor r10,r26 + eor r11,r27 + eor r12,r28 + eor r13,r29 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r9,r24 + eor r6,r25 + eor r7,r14 + eor r8,r15 + movw r14,r10 + movw r24,r12 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r13,r24 + eor r10,r25 + eor r11,r14 + eor r12,r15 + eor r20,r9 + eor r21,r6 + eor r22,r7 + eor r23,r8 + eor r26,r9 + eor r27,r6 + eor r28,r7 + eor r29,r8 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + std Z+16,r26 + std Z+17,r27 + std Z+18,r28 + std Z+19,r29 + std Z+32,r2 + std Z+33,r3 + std Z+34,r4 + std Z+35,r5 + ldd r20,Z+4 + ldd r21,Z+5 + ldd r22,Z+6 + ldd r23,Z+7 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r6,r20 + movw r8,r22 + eor r6,r26 + eor r7,r27 + eor r8,r28 + eor r9,r29 + eor r6,r2 + eor r7,r3 + eor r8,r4 + eor r9,r5 + movw r14,r6 + movw r24,r8 + mov r0,r1 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + lsr r9 + ror r8 + ror r7 + ror r6 + ror r0 + or r9,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r9,r24 + eor r6,r25 + eor r7,r14 + eor r8,r15 + eor r20,r13 + eor r21,r10 + eor r22,r11 + eor r23,r12 + eor r26,r13 + eor r27,r10 + eor r28,r11 + eor r29,r12 + eor r2,r13 + eor r3,r10 + eor r4,r11 + eor r5,r12 + std Z+4,r20 + std Z+5,r21 + std Z+6,r22 + std Z+7,r23 + std Z+20,r26 + std Z+21,r27 + std Z+22,r28 + std Z+23,r29 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + ldd r2,Z+40 + ldd r3,Z+41 + ldd r4,Z+42 + ldd r5,Z+43 + movw r10,r20 + movw r12,r22 + eor r10,r26 + eor r11,r27 + eor r12,r28 + eor r13,r29 + eor r10,r2 + eor r11,r3 + eor r12,r4 + eor r13,r5 + movw r14,r10 + movw r24,r12 + mov r0,r1 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + lsr r13 + ror r12 + ror r11 + ror r10 + ror r0 + or r13,r0 + mov r0,r1 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + lsr r25 + ror r24 + ror r15 + ror r14 + ror r0 + or r25,r0 + eor r13,r24 + eor r10,r25 + eor r11,r14 + eor r12,r15 + eor r20,r9 + eor r21,r6 + eor r22,r7 + eor r23,r8 + eor r26,r9 + eor r27,r6 + eor r28,r7 + eor r29,r8 + eor r2,r9 + eor r3,r6 + eor r4,r7 + eor r5,r8 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + std Z+24,r26 + std Z+25,r27 + std Z+26,r28 + std Z+27,r29 + std Z+40,r2 + std Z+41,r3 + std Z+42,r4 + std Z+43,r5 + ldd r0,Z+12 + eor r0,r13 + std Z+12,r0 + ldd r0,Z+13 + eor r0,r10 + std Z+13,r0 + ldd r0,Z+14 + eor r0,r11 + std Z+14,r0 + ldd r0,Z+15 + eor r0,r12 + std Z+15,r0 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 + eor r6,r13 + eor r7,r10 + eor r8,r11 + eor r9,r12 + ldd r14,Z+44 + ldd r15,Z+45 + ldd r24,Z+46 + ldd r25,Z+47 + eor r14,r13 + eor r15,r10 + eor r24,r11 + eor r25,r12 + ldd r10,Z+24 + ldd r11,Z+25 + ldd r12,Z+26 + ldd r13,Z+27 + std Z+28,r10 + std Z+29,r11 + std Z+30,r12 + std Z+31,r13 + ldd r10,Z+20 + ldd r11,Z+21 + ldd r12,Z+22 + ldd r13,Z+23 + std Z+24,r10 + std Z+25,r11 + std Z+26,r12 + std Z+27,r13 + ldd r10,Z+16 + ldd r11,Z+17 + ldd r12,Z+18 + ldd r13,Z+19 + std Z+20,r10 + std Z+21,r11 + std Z+22,r12 + std Z+23,r13 + std Z+16,r6 + std Z+17,r7 + std Z+18,r8 + std Z+19,r9 + ldd r6,Z+32 + ldd r7,Z+33 + ldd r8,Z+34 + ldd r9,Z+35 + mov r0,r9 + mov r9,r8 + mov r8,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+32,r6 + std Z+33,r7 + std Z+34,r8 + std Z+35,r9 + ldd r6,Z+36 + ldd r7,Z+37 + ldd r8,Z+38 + ldd r9,Z+39 + mov r0,r9 + mov r9,r8 + mov r8,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+36,r6 + std Z+37,r7 + std Z+38,r8 + std Z+39,r9 + ldd r6,Z+40 + ldd r7,Z+41 + ldd r8,Z+42 + ldd r9,Z+43 + mov r0,r9 + mov r9,r8 + mov r8,r7 + mov r7,r6 + mov r6,r0 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+40,r6 + std Z+41,r7 + std Z+42,r8 + std Z+43,r9 + mov r0,r25 + mov r25,r24 + mov r24,r15 + mov r15,r14 + mov r14,r0 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + lsl r14 + rol r15 + rol r24 + rol r25 + adc r14,r1 + std Z+44,r14 + std Z+45,r15 + std Z+46,r24 + std Z+47,r25 + ld r20,Z + ldd r21,Z+1 + ldd r22,Z+2 + ldd r23,Z+3 + eor r20,r18 + eor r21,r19 + ldd r26,Z+16 + ldd r27,Z+17 + ldd r28,Z+18 + ldd r29,Z+19 + ldd r2,Z+32 + ldd r3,Z+33 + ldd r4,Z+34 + ldd r5,Z+35 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + st Z,r20 + std Z+1,r21 + std Z+2,r22 + std Z+3,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+16,r26 + std Z+17,r27 + std Z+18,r28 + std Z+19,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+32,r2 + std Z+33,r3 + std Z+34,r4 + std Z+35,r5 + ldd r20,Z+4 + ldd r21,Z+5 + ldd r22,Z+6 + ldd r23,Z+7 + ldd r26,Z+20 + ldd r27,Z+21 + ldd r28,Z+22 + ldd r29,Z+23 + ldd r2,Z+36 + ldd r3,Z+37 + ldd r4,Z+38 + ldd r5,Z+39 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + std Z+4,r20 + std Z+5,r21 + std Z+6,r22 + std Z+7,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+20,r26 + std Z+21,r27 + std Z+22,r28 + std Z+23,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+36,r2 + std Z+37,r3 + std Z+38,r4 + std Z+39,r5 + ldd r20,Z+8 + ldd r21,Z+9 + ldd r22,Z+10 + ldd r23,Z+11 + ldd r26,Z+24 + ldd r27,Z+25 + ldd r28,Z+26 + ldd r29,Z+27 + ldd r2,Z+40 + ldd r3,Z+41 + ldd r4,Z+42 + ldd r5,Z+43 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + std Z+8,r20 + std Z+9,r21 + std Z+10,r22 + std Z+11,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+24,r26 + std Z+25,r27 + std Z+26,r28 + std Z+27,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+40,r2 + std Z+41,r3 + std Z+42,r4 + std Z+43,r5 + ldd r20,Z+12 + ldd r21,Z+13 + ldd r22,Z+14 + ldd r23,Z+15 + ldd r26,Z+28 + ldd r27,Z+29 + ldd r28,Z+30 + ldd r29,Z+31 + ldd r2,Z+44 + ldd r3,Z+45 + ldd r4,Z+46 + ldd r5,Z+47 + movw r6,r2 + movw r8,r4 + mov r0,r26 + com r0 + and r6,r0 + mov r0,r27 + com r0 + and r7,r0 + mov r0,r28 + com r0 + and r8,r0 + mov r0,r29 + com r0 + and r9,r0 + eor r20,r6 + eor r21,r7 + eor r22,r8 + eor r23,r9 + std Z+12,r20 + std Z+13,r21 + std Z+14,r22 + std Z+15,r23 + movw r6,r20 + movw r8,r22 + mov r0,r2 + com r0 + and r6,r0 + mov r0,r3 + com r0 + and r7,r0 + mov r0,r4 + com r0 + and r8,r0 + mov r0,r5 + com r0 + and r9,r0 + eor r26,r6 + eor r27,r7 + eor r28,r8 + eor r29,r9 + std Z+28,r26 + std Z+29,r27 + std Z+30,r28 + std Z+31,r29 + mov r0,r20 + com r0 + and r26,r0 + mov r0,r21 + com r0 + and r27,r0 + mov r0,r22 + com r0 + and r28,r0 + mov r0,r23 + com r0 + and r29,r0 + eor r2,r26 + eor r3,r27 + eor r4,r28 + eor r5,r29 + std Z+44,r2 + std Z+45,r3 + std Z+46,r4 + std Z+47,r5 + ldd r6,Z+16 + ldd r7,Z+17 + ldd r8,Z+18 + ldd r9,Z+19 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+16,r6 + std Z+17,r7 + std Z+18,r8 + std Z+19,r9 + ldd r6,Z+20 + ldd r7,Z+21 + ldd r8,Z+22 + ldd r9,Z+23 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+20,r6 + std Z+21,r7 + std Z+22,r8 + std Z+23,r9 + ldd r6,Z+24 + ldd r7,Z+25 + ldd r8,Z+26 + ldd r9,Z+27 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+24,r6 + std Z+25,r7 + std Z+26,r8 + std Z+27,r9 + ldd r6,Z+28 + ldd r7,Z+29 + ldd r8,Z+30 + ldd r9,Z+31 + lsl r6 + rol r7 + rol r8 + rol r9 + adc r6,r1 + std Z+28,r6 + std Z+29,r7 + std Z+30,r8 + std Z+31,r9 + ldd r6,Z+40 + ldd r7,Z+41 + ldd r8,Z+42 + ldd r9,Z+43 + ldd r10,Z+44 + ldd r11,Z+45 + ldd r12,Z+46 + ldd r13,Z+47 + ldd r14,Z+32 + ldd r15,Z+33 + ldd r24,Z+34 + ldd r25,Z+35 + std Z+40,r25 + std Z+41,r14 + std Z+42,r15 + std Z+43,r24 + ldd r14,Z+36 + ldd r15,Z+37 + ldd r24,Z+38 + ldd r25,Z+39 + std Z+44,r25 + std Z+45,r14 + std Z+46,r15 + std Z+47,r24 + std Z+32,r9 + std Z+33,r6 + std Z+34,r7 + std Z+35,r8 + std Z+36,r13 + std Z+37,r10 + std Z+38,r11 + std Z+39,r12 + ret +888: + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop r7 + pop r6 + pop r5 + pop r4 + pop r3 + pop r2 + pop r29 + pop r28 + ret + .size xoodoo_permute, .-xoodoo_permute + +#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.c new file mode 100644 index 0000000..59bb8bf --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.c @@ -0,0 +1,166 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "internal-xoodoo.h" + +#if !defined(__AVR__) + +void xoodoo_permute(xoodoo_state_t *state) +{ + static uint16_t const rc[XOODOO_ROUNDS] = { + 0x0058, 0x0038, 0x03C0, 0x00D0, 0x0120, 0x0014, + 0x0060, 0x002C, 0x0380, 0x00F0, 0x01A0, 0x0012 + }; + uint8_t round; + uint32_t x00, x01, x02, x03; + uint32_t x10, x11, x12, x13; + uint32_t x20, x21, x22, x23; + uint32_t t1, t2; + + /* Load the state and convert from little-endian byte order */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + x00 = state->S[0][0]; + x01 = state->S[0][1]; + x02 = state->S[0][2]; + x03 = state->S[0][3]; + x10 = state->S[1][0]; + x11 = state->S[1][1]; + x12 = state->S[1][2]; + x13 = state->S[1][3]; + x20 = state->S[2][0]; + x21 = state->S[2][1]; + x22 = state->S[2][2]; + x23 = state->S[2][3]; +#else + x00 = le_load_word32(state->B); + x01 = le_load_word32(state->B + 4); + x02 = le_load_word32(state->B + 8); + x03 = le_load_word32(state->B + 12); + x10 = le_load_word32(state->B + 16); + x11 = le_load_word32(state->B + 20); + x12 = le_load_word32(state->B + 24); + x13 = le_load_word32(state->B + 28); + x20 = le_load_word32(state->B + 32); + x21 = le_load_word32(state->B + 36); + x22 = le_load_word32(state->B + 40); + x23 = le_load_word32(state->B + 44); +#endif + + /* Perform all permutation rounds */ + for (round = 0; round < XOODOO_ROUNDS; ++round) { + /* Optimization ideas from the Xoodoo implementation here: + * https://github.com/XKCP/XKCP/tree/master/lib/low/Xoodoo/Optimized */ + + /* Step theta: Mix column parity */ + t1 = x03 ^ x13 ^ x23; + t2 = x00 ^ x10 ^ x20; + t1 = leftRotate5(t1) ^ leftRotate14(t1); + t2 = leftRotate5(t2) ^ leftRotate14(t2); + x00 ^= t1; + x10 ^= t1; + x20 ^= t1; + t1 = x01 ^ x11 ^ x21; + t1 = leftRotate5(t1) ^ leftRotate14(t1); + x01 ^= t2; + x11 ^= t2; + x21 ^= t2; + t2 = x02 ^ x12 ^ x22; + t2 = leftRotate5(t2) ^ leftRotate14(t2); + x02 ^= t1; + x12 ^= t1; + x22 ^= t1; + x03 ^= t2; + x13 ^= t2; + x23 ^= t2; + + /* Step rho-west: Plane shift */ + t1 = x13; + x13 = x12; + x12 = x11; + x11 = x10; + x10 = t1; + x20 = leftRotate11(x20); + x21 = leftRotate11(x21); + x22 = leftRotate11(x22); + x23 = leftRotate11(x23); + + /* Step iota: Add the round constant to the state */ + x00 ^= rc[round]; + + /* Step chi: Non-linear layer */ + x00 ^= (~x10) & x20; + x10 ^= (~x20) & x00; + x20 ^= (~x00) & x10; + x01 ^= (~x11) & x21; + x11 ^= (~x21) & x01; + x21 ^= (~x01) & x11; + x02 ^= (~x12) & x22; + x12 ^= (~x22) & x02; + x22 ^= (~x02) & x12; + x03 ^= (~x13) & x23; + x13 ^= (~x23) & x03; + x23 ^= (~x03) & x13; + + /* Step rho-east: Plane shift */ + x10 = leftRotate1(x10); + x11 = leftRotate1(x11); + x12 = leftRotate1(x12); + x13 = leftRotate1(x13); + t1 = leftRotate8(x22); + t2 = leftRotate8(x23); + x22 = leftRotate8(x20); + x23 = leftRotate8(x21); + x20 = t1; + x21 = t2; + } + + /* Convert back into little-endian and store to the output state */ +#if defined(LW_UTIL_LITTLE_ENDIAN) + state->S[0][0] = x00; + state->S[0][1] = x01; + state->S[0][2] = x02; + state->S[0][3] = x03; + state->S[1][0] = x10; + state->S[1][1] = x11; + state->S[1][2] = x12; + state->S[1][3] = x13; + state->S[2][0] = x20; + state->S[2][1] = x21; + state->S[2][2] = x22; + state->S[2][3] = x23; +#else + le_store_word32(state->B, x00); + le_store_word32(state->B + 4, x01); + le_store_word32(state->B + 8, x02); + le_store_word32(state->B + 12, x03); + le_store_word32(state->B + 16, x10); + le_store_word32(state->B + 20, x11); + le_store_word32(state->B + 24, x12); + le_store_word32(state->B + 28, x13); + le_store_word32(state->B + 32, x20); + le_store_word32(state->B + 36, x21); + le_store_word32(state->B + 40, x22); + le_store_word32(state->B + 44, x23); +#endif +} + +#endif /* !__AVR__ */ diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.h new file mode 100644 index 0000000..f6eddd8 --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/internal-xoodoo.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LW_INTERNAL_XOODOO_H +#define LW_INTERNAL_XOODOO_H + +#include "internal-util.h" + +/** + * \file internal-xoodoo.h + * \brief Internal implementation of the Xoodoo permutation. + * + * References: https://keccak.team/xoodyak.html + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Number of rows in the Xoodoo state. + */ +#define XOODOO_ROWS 3 + +/** + * \brief Number of columns in the Xoodoo state. + */ +#define XOODOO_COLS 4 + +/** + * \brief Number of rounds for the Xoodoo permutation. + */ +#define XOODOO_ROUNDS 12 + +/** + * \brief State information for the Xoodoo permutation. + */ +typedef union +{ + /** Words of the state */ + uint32_t S[XOODOO_ROWS][XOODOO_COLS]; + + /** Bytes of the state */ + uint8_t B[XOODOO_ROWS * XOODOO_COLS * sizeof(uint32_t)]; + +} xoodoo_state_t; + +/** + * \brief Permutes the Xoodoo state. + * + * \param state The Xoodoo state. + * + * The state will be in little-endian before and after the operation. + */ +void xoodoo_permute(xoodoo_state_t *state); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.c b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.c new file mode 100644 index 0000000..4ad4fce --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.c @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "xoodyak.h" +#include "internal-xoodoo.h" +#include + +aead_cipher_t const xoodyak_cipher = { + "Xoodyak", + XOODYAK_KEY_SIZE, + XOODYAK_NONCE_SIZE, + XOODYAK_TAG_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + xoodyak_aead_encrypt, + xoodyak_aead_decrypt +}; + +aead_hash_algorithm_t const xoodyak_hash_algorithm = { + "Xoodyak-Hash", + sizeof(xoodyak_hash_state_t), + XOODYAK_HASH_SIZE, + AEAD_FLAG_LITTLE_ENDIAN, + xoodyak_hash, + (aead_hash_init_t)xoodyak_hash_init, + (aead_hash_update_t)xoodyak_hash_absorb, + (aead_hash_finalize_t)xoodyak_hash_finalize, + (aead_xof_absorb_t)xoodyak_hash_absorb, + (aead_xof_squeeze_t)xoodyak_hash_squeeze +}; + +/** + * \brief Rate for absorbing data into the sponge state. + */ +#define XOODYAK_ABSORB_RATE 44 + +/** + * \brief Rate for squeezing data out of the sponge. + */ +#define XOODYAK_SQUEEZE_RATE 24 + +/** + * \brief Rate for absorbing and squeezing in hashing mode. + */ +#define XOODYAK_HASH_RATE 16 + +/** + * \brief Phase identifier for "up" mode, which indicates that a block + * permutation has just been performed. + */ +#define XOODYAK_PHASE_UP 0 + +/** + * \brief Phase identifier for "down" mode, which indicates that data has + * been absorbed but that a block permutation has not been done yet. + */ +#define XOODYAK_PHASE_DOWN 1 + +/** + * \brief Absorbs data into the Xoodoo permutation state. + * + * \param state Xoodoo permutation state. + * \param phase Points to the current phase, up or down. + * \param data Points to the data to be absorbed. + * \param len Length of the data to be absorbed. + */ +static void xoodyak_absorb + (xoodoo_state_t *state, uint8_t *phase, + const unsigned char *data, unsigned long long len) +{ + uint8_t domain = 0x03; + unsigned temp; + while (len > XOODYAK_ABSORB_RATE) { + if (*phase != XOODYAK_PHASE_UP) + xoodoo_permute(state); + lw_xor_block(state->B, data, XOODYAK_ABSORB_RATE); + state->B[XOODYAK_ABSORB_RATE] ^= 0x01; /* Padding */ + state->B[sizeof(state->B) - 1] ^= domain; + data += XOODYAK_ABSORB_RATE; + len -= XOODYAK_ABSORB_RATE; + domain = 0x00; + *phase = XOODYAK_PHASE_DOWN; + } + temp = (unsigned)len; + if (*phase != XOODYAK_PHASE_UP) + xoodoo_permute(state); + lw_xor_block(state->B, data, temp); + state->B[temp] ^= 0x01; /* Padding */ + state->B[sizeof(state->B) - 1] ^= domain; + *phase = XOODYAK_PHASE_DOWN; +} + +int xoodyak_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k) +{ + xoodoo_state_t state; + uint8_t phase, domain; + unsigned temp; + (void)nsec; + + /* Set the length of the returned ciphertext */ + *clen = mlen + XOODYAK_TAG_SIZE; + + /* Initialize the state with the key */ + memcpy(state.B, k, XOODYAK_KEY_SIZE); + memset(state.B + XOODYAK_KEY_SIZE, 0, sizeof(state.B) - XOODYAK_KEY_SIZE); + state.B[XOODYAK_KEY_SIZE + 1] = 0x01; /* Padding */ + state.B[sizeof(state.B) - 1] = 0x02; /* Domain separation */ + phase = XOODYAK_PHASE_DOWN; + + /* Absorb the nonce and associated data */ + xoodyak_absorb(&state, &phase, npub, XOODYAK_NONCE_SIZE); + xoodyak_absorb(&state, &phase, ad, adlen); + + /* Encrypt the plaintext to produce the ciphertext */ + domain = 0x80; + while (mlen > XOODYAK_SQUEEZE_RATE) { + state.B[sizeof(state.B) - 1] ^= domain; + xoodoo_permute(&state); + lw_xor_block_2_dest(c, state.B, m, XOODYAK_SQUEEZE_RATE); + state.B[XOODYAK_SQUEEZE_RATE] ^= 0x01; /* Padding */ + c += XOODYAK_SQUEEZE_RATE; + m += XOODYAK_SQUEEZE_RATE; + mlen -= XOODYAK_SQUEEZE_RATE; + domain = 0; + } + state.B[sizeof(state.B) - 1] ^= domain; + xoodoo_permute(&state); + temp = (unsigned)mlen; + lw_xor_block_2_dest(c, state.B, m, temp); + state.B[temp] ^= 0x01; /* Padding */ + c += temp; + + /* Generate the authentication tag */ + state.B[sizeof(state.B) - 1] ^= 0x40; /* Domain separation */ + xoodoo_permute(&state); + memcpy(c, state.B, XOODYAK_TAG_SIZE); + return 0; +} + +int xoodyak_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k) +{ + xoodoo_state_t state; + uint8_t phase, domain; + unsigned temp; + unsigned char *mtemp = m; + (void)nsec; + + /* Validate the ciphertext length and set the return "mlen" value */ + if (clen < XOODYAK_TAG_SIZE) + return -1; + *mlen = clen - XOODYAK_TAG_SIZE; + + /* Initialize the state with the key */ + memcpy(state.B, k, XOODYAK_KEY_SIZE); + memset(state.B + XOODYAK_KEY_SIZE, 0, sizeof(state.B) - XOODYAK_KEY_SIZE); + state.B[XOODYAK_KEY_SIZE + 1] = 0x01; /* Padding */ + state.B[sizeof(state.B) - 1] = 0x02; /* Domain separation */ + phase = XOODYAK_PHASE_DOWN; + + /* Absorb the nonce and associated data */ + xoodyak_absorb(&state, &phase, npub, XOODYAK_NONCE_SIZE); + xoodyak_absorb(&state, &phase, ad, adlen); + + /* Decrypt the ciphertext to produce the plaintext */ + domain = 0x80; + clen -= XOODYAK_TAG_SIZE; + while (clen > XOODYAK_SQUEEZE_RATE) { + state.B[sizeof(state.B) - 1] ^= domain; + xoodoo_permute(&state); + lw_xor_block_swap(m, state.B, c, XOODYAK_SQUEEZE_RATE); + state.B[XOODYAK_SQUEEZE_RATE] ^= 0x01; /* Padding */ + c += XOODYAK_SQUEEZE_RATE; + m += XOODYAK_SQUEEZE_RATE; + clen -= XOODYAK_SQUEEZE_RATE; + domain = 0; + } + state.B[sizeof(state.B) - 1] ^= domain; + xoodoo_permute(&state); + temp = (unsigned)clen; + lw_xor_block_swap(m, state.B, c, temp); + state.B[temp] ^= 0x01; /* Padding */ + c += temp; + + /* Check the authentication tag */ + state.B[sizeof(state.B) - 1] ^= 0x40; /* Domain separation */ + xoodoo_permute(&state); + return aead_check_tag(mtemp, *mlen, state.B, c, XOODYAK_TAG_SIZE); +} + +int xoodyak_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen) +{ + xoodyak_hash_state_t state; + xoodyak_hash_init(&state); + xoodyak_hash_absorb(&state, in, inlen); + xoodyak_hash_squeeze(&state, out, XOODYAK_HASH_SIZE); + return 0; +} + +#define XOODYAK_HASH_MODE_INIT_ABSORB 0 +#define XOODYAK_HASH_MODE_ABSORB 1 +#define XOODYAK_HASH_MODE_SQUEEZE 2 + +#define xoodoo_hash_permute(state) \ + xoodoo_permute((xoodoo_state_t *)((state)->s.state)) + +void xoodyak_hash_init(xoodyak_hash_state_t *state) +{ + memset(state, 0, sizeof(xoodyak_hash_state_t)); + state->s.mode = XOODYAK_HASH_MODE_INIT_ABSORB; +} + +void xoodyak_hash_absorb + (xoodyak_hash_state_t *state, const unsigned char *in, + unsigned long long inlen) +{ + uint8_t domain; + unsigned temp; + + /* If we were squeezing, then restart the absorb phase */ + if (state->s.mode == XOODYAK_HASH_MODE_SQUEEZE) { + xoodoo_hash_permute(state); + state->s.mode = XOODYAK_HASH_MODE_INIT_ABSORB; + state->s.count = 0; + } + + /* The first block needs a different domain separator to the others */ + domain = (state->s.mode == XOODYAK_HASH_MODE_INIT_ABSORB) ? 0x01 : 0x00; + + /* Absorb the input data into the state */ + while (inlen > 0) { + if (state->s.count >= XOODYAK_HASH_RATE) { + state->s.state[XOODYAK_HASH_RATE] ^= 0x01; /* Padding */ + state->s.state[sizeof(state->s.state) - 1] ^= domain; + xoodoo_hash_permute(state); + state->s.mode = XOODYAK_HASH_MODE_ABSORB; + state->s.count = 0; + domain = 0x00; + } + temp = XOODYAK_HASH_RATE - state->s.count; + if (temp > inlen) + temp = (unsigned)inlen; + lw_xor_block(state->s.state + state->s.count, in, temp); + state->s.count += temp; + in += temp; + inlen -= temp; + } +} + +void xoodyak_hash_squeeze + (xoodyak_hash_state_t *state, unsigned char *out, + unsigned long long outlen) +{ + uint8_t domain; + unsigned temp; + + /* If we were absorbing, then terminate the absorb phase */ + if (state->s.mode != XOODYAK_HASH_MODE_SQUEEZE) { + domain = (state->s.mode == XOODYAK_HASH_MODE_INIT_ABSORB) ? 0x01 : 0x00; + state->s.state[state->s.count] ^= 0x01; /* Padding */ + state->s.state[sizeof(state->s.state) - 1] ^= domain; + xoodoo_hash_permute(state); + state->s.mode = XOODYAK_HASH_MODE_SQUEEZE; + state->s.count = 0; + } + + /* Squeeze data out of the state */ + while (outlen > 0) { + if (state->s.count >= XOODYAK_HASH_RATE) { + /* Padding is always at index 0 for squeezing subsequent + * blocks because the number of bytes we have absorbed + * since the previous block was squeezed out is zero */ + state->s.state[0] ^= 0x01; + xoodoo_hash_permute(state); + state->s.count = 0; + } + temp = XOODYAK_HASH_RATE - state->s.count; + if (temp > outlen) + temp = (unsigned)outlen; + memcpy(out, state->s.state + state->s.count, temp); + state->s.count += temp; + out += temp; + outlen -= temp; + } +} + +void xoodyak_hash_finalize + (xoodyak_hash_state_t *state, unsigned char *out) +{ + xoodyak_hash_squeeze(state, out, XOODYAK_HASH_SIZE); +} diff --git a/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.h b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.h new file mode 100644 index 0000000..f4777d5 --- /dev/null +++ b/xoodyak/Implementations/crypto_hash/xoodyakv1/rhys/xoodyak.h @@ -0,0 +1,226 @@ +/* + * Copyright (C) 2020 Southern Storm Software, Pty Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LWCRYPTO_XOODYAK_H +#define LWCRYPTO_XOODYAK_H + +#include "aead-common.h" + +/** + * \file xoodyak.h + * \brief Xoodyak authenticated encryption algorithm. + * + * Xoodyak is an authenticated encryption and hash algorithm pair based + * around the 384-bit Xoodoo permutation that is similar in structure to + * Keccak but is more efficient than Keccak on 32-bit embedded devices. + * The Cyclist mode of operation is used to convert the permutation + * into a sponge for the higher-level algorithms. + * + * The Xoodyak encryption mode has a 128-bit key, a 128-bit nonce, + * and a 128-bit authentication tag. The Xoodyak hashing mode has a + * 256-bit fixed hash output and can also be used as an extensible + * output function (XOF). + * + * The Xoodyak specification describes a re-keying mechanism where the + * key for one packet is used to derive the key to use on the next packet. + * This provides some resistance against side channel attacks by making + * the session key a moving target. This library does not currently + * implement re-keying. + * + * References: https://keccak.team/xoodyak.html + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Size of the key for Xoodyak. + */ +#define XOODYAK_KEY_SIZE 16 + +/** + * \brief Size of the authentication tag for Xoodyak. + */ +#define XOODYAK_TAG_SIZE 16 + +/** + * \brief Size of the nonce for Xoodyak. + */ +#define XOODYAK_NONCE_SIZE 16 + +/** + * \brief Size of the hash output for Xoodyak. + */ +#define XOODYAK_HASH_SIZE 32 + +/** + * \brief State information for Xoodyak incremental hashing modes. + */ +typedef union +{ + struct { + unsigned char state[48]; /**< Current hash state */ + unsigned char count; /**< Number of bytes in the current block */ + unsigned char mode; /**< Hash mode: absorb or squeeze */ + } s; /**< State */ + unsigned long long align; /**< For alignment of this structure */ + +} xoodyak_hash_state_t; + +/** + * \brief Meta-information block for the Xoodyak cipher. + */ +extern aead_cipher_t const xoodyak_cipher; + +/** + * \brief Meta-information block for the Xoodyak hash algorithm. + */ +extern aead_hash_algorithm_t const xoodyak_hash_algorithm; + +/** + * \brief Encrypts and authenticates a packet with Xoodyak. + * + * \param c Buffer to receive the output. + * \param clen On exit, set to the length of the output which includes + * the ciphertext and the 16 byte authentication tag. + * \param m Buffer that contains the plaintext message to encrypt. + * \param mlen Length of the plaintext message in bytes. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param nsec Secret nonce - not used by this algorithm. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to encrypt the packet. + * + * \return 0 on success, or a negative value if there was an error in + * the parameters. + * + * \sa xoodyak_aead_decrypt() + */ +int xoodyak_aead_encrypt + (unsigned char *c, unsigned long long *clen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *nsec, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Decrypts and authenticates a packet with Xoodyak. + * + * \param m Buffer to receive the plaintext message on output. + * \param mlen Receives the length of the plaintext message on output. + * \param nsec Secret nonce - not used by this algorithm. + * \param c Buffer that contains the ciphertext and authentication + * tag to decrypt. + * \param clen Length of the input data in bytes, which includes the + * ciphertext and the 16 byte authentication tag. + * \param ad Buffer that contains associated data to authenticate + * along with the packet but which does not need to be encrypted. + * \param adlen Length of the associated data in bytes. + * \param npub Points to the public nonce for the packet which must + * be 16 bytes in length. + * \param k Points to the 16 bytes of the key to use to decrypt the packet. + * + * \return 0 on success, -1 if the authentication tag was incorrect, + * or some other negative number if there was an error in the parameters. + * + * \sa xoodyak_aead_encrypt() + */ +int xoodyak_aead_decrypt + (unsigned char *m, unsigned long long *mlen, + unsigned char *nsec, + const unsigned char *c, unsigned long long clen, + const unsigned char *ad, unsigned long long adlen, + const unsigned char *npub, + const unsigned char *k); + +/** + * \brief Hashes a block of input data with Xoodyak to generate a hash value. + * + * \param out Buffer to receive the hash output which must be at least + * XOODYAK_HASH_SIZE bytes in length. + * \param in Points to the input data to be hashed. + * \param inlen Length of the input data in bytes. + * + * \return Returns zero on success or -1 if there was an error in the + * parameters. + */ +int xoodyak_hash + (unsigned char *out, const unsigned char *in, unsigned long long inlen); + +/** + * \brief Initializes the state for a Xoodyak hashing operation. + * + * \param state Hash state to be initialized. + * + * \sa xoodyak_hash_absorb(), xoodyak_hash_squeeze(), xoodyak_hash() + */ +void xoodyak_hash_init(xoodyak_hash_state_t *state); + +/** + * \brief Aborbs more input data into a Xoodyak hashing state. + * + * \param state Hash state to be updated. + * \param in Points to the input data to be absorbed into the state. + * \param inlen Length of the input data to be absorbed into the state. + * + * \sa xoodyak_hash_init(), xoodyak_hash_squeeze() + */ +void xoodyak_hash_absorb + (xoodyak_hash_state_t *state, const unsigned char *in, + unsigned long long inlen); + +/** + * \brief Squeezes output data from a Xoodyak hashing state. + * + * \param state Hash state to squeeze the output data from. + * \param out Points to the output buffer to receive the squeezed data. + * \param outlen Number of bytes of data to squeeze out of the state. + * + * \sa xoodyak_hash_init(), xoodyak_hash_absorb() + */ +void xoodyak_hash_squeeze + (xoodyak_hash_state_t *state, unsigned char *out, + unsigned long long outlen); + +/** + * \brief Returns the final hash value from a Xoodyak hashing operation. + * + * \param state Hash state to be finalized. + * \param out Points to the output buffer to receive the hash value. + * + * \note This is a wrapper around xoodyak_hash_squeeze() for a fixed length + * of XOODYAK_HASH_SIZE bytes. + * + * \sa xoodyak_hash_init(), xoodyak_hash_absorb() + */ +void xoodyak_hash_finalize + (xoodyak_hash_state_t *state, unsigned char *out); + +#ifdef __cplusplus +} +#endif + +#endif -- libgit2 0.26.0